diff --git a/README.md b/README.md index 74757e0..35558a3 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/siddontang/ledisdb.svg?branch=develop)](https://travis-ci.org/siddontang/ledisdb) -Ledisdb is a high-performance NoSQL database, similar to Redis, written in [Go](http://golang.org/). It supports many data structures including kv, list, hash, zset, bitmap,set. +Ledisdb is a high-performance NoSQL database, similar to Redis, written in [Go](http://golang.org/). It supports many data structures including kv, list, hash, zset, set. LedisDB now supports multiple different databases as backends. @@ -11,7 +11,7 @@ LedisDB now supports multiple different databases as backends. ## Features -+ Rich data structure: KV, List, Hash, ZSet, Bitmap, Set. ++ Rich data structure: KV, List, Hash, ZSet, Set. + Data storage is not limited by RAM. + Various backends supported: LevelDB, goleveldb, LMDB, RocksDB, BoltDB, RAM. + Supports transactions using LMDB or BotlDB. diff --git a/client/go/LICENSE b/client/LICENSE similarity index 100% rename from client/go/LICENSE rename to client/LICENSE diff --git a/client/go/ledis/client.go b/client/goledis/client.go similarity index 68% rename from client/go/ledis/client.go rename to client/goledis/client.go index bdc532f..a6811f4 100644 --- a/client/go/ledis/client.go +++ b/client/goledis/client.go @@ -2,6 +2,7 @@ package ledis import ( "container/list" + "net" "strings" "sync" ) @@ -46,11 +47,34 @@ func NewClient(cfg *Config) *Client { } func (c *Client) Do(cmd string, args ...interface{}) (interface{}, error) { - co := c.get() - r, err := co.Do(cmd, args...) - c.put(co) + var co *Conn + var err error + var r interface{} - return r, err + for i := 0; i < 2; i++ { + co, err = c.get() + if err != nil { + return nil, err + } + + r, err = co.Do(cmd, args...) + if err != nil { + co.finalize() + + if e, ok := err.(*net.OpError); ok && strings.Contains(e.Error(), "use of closed network connection") { + //send to a closed connection, try again + continue + } + + return nil, err + } else { + c.put(co) + } + + return r, nil + } + + return nil, err } func (c *Client) Close() { @@ -66,11 +90,11 @@ func (c *Client) Close() { } } -func (c *Client) Get() *Conn { +func (c *Client) Get() (*Conn, error) { return c.get() } -func (c *Client) get() *Conn { +func (c *Client) get() (*Conn, error) { c.Lock() if c.conns.Len() == 0 { c.Unlock() @@ -83,7 +107,7 @@ func (c *Client) get() *Conn { c.Unlock() - return co + return co, nil } } diff --git a/client/go/ledis/conn.go b/client/goledis/conn.go similarity index 78% rename from client/go/ledis/conn.go rename to client/goledis/conn.go index 12f5f00..d78bc03 100644 --- a/client/go/ledis/conn.go +++ b/client/goledis/conn.go @@ -8,8 +8,6 @@ import ( "io" "net" "strconv" - "strings" - "sync" "time" ) @@ -19,50 +17,37 @@ type Error string func (err Error) Error() string { return string(err) } type Conn struct { - cm sync.Mutex - wm sync.Mutex - rm sync.Mutex - - closed bool - client *Client - addr string - c net.Conn br *bufio.Reader bw *bufio.Writer - rSize int - wSize int - // Scratch space for formatting argument length. // '*' or '$', length, "\r\n" lenScratch [32]byte // Scratch space for formatting integers and floats. numScratch [40]byte - - connectTimeout time.Duration } -func NewConn(addr string) *Conn { - co := new(Conn) - co.addr = addr - - co.rSize = 4096 - co.wSize = 4096 - - co.closed = false - - return co +func Connect(addr string) (*Conn, error) { + return ConnectWithSize(addr, 4096, 4096) } -func NewConnSize(addr string, readSize int, writeSize int) *Conn { - co := NewConn(addr) - co.rSize = readSize - co.wSize = writeSize - return co +func ConnectWithSize(addr string, readSize int, writeSize int) (*Conn, error) { + c := new(Conn) + + var err error + c.c, err = net.Dial(getProto(addr), addr) + if err != nil { + return nil, err + } + + c.br = bufio.NewReaderSize(c.c, readSize) + c.bw = bufio.NewWriterSize(c.c, writeSize) + + return c, nil } func (c *Conn) Close() { @@ -73,26 +58,12 @@ func (c *Conn) Close() { } } -func (c *Conn) SetConnectTimeout(t time.Duration) { - c.cm.Lock() - c.connectTimeout = t - c.cm.Unlock() -} - func (c *Conn) SetReadDeadline(t time.Time) { - c.cm.Lock() - if c.c != nil { - c.c.SetReadDeadline(t) - } - c.cm.Unlock() + c.c.SetReadDeadline(t) } func (c *Conn) SetWriteDeadline(t time.Time) { - c.cm.Lock() - if c.c != nil { - c.c.SetWriteDeadline(t) - } - c.cm.Unlock() + c.c.SetWriteDeadline(t) } func (c *Conn) Do(cmd string, args ...interface{}) (interface{}, error) { @@ -104,28 +75,6 @@ func (c *Conn) Do(cmd string, args ...interface{}) (interface{}, error) { } func (c *Conn) Send(cmd string, args ...interface{}) error { - var err error - for i := 0; i < 2; i++ { - if err = c.send(cmd, args...); err != nil { - if e, ok := err.(*net.OpError); ok && strings.Contains(e.Error(), "use of closed network connection") { - //send to a closed connection, try again - continue - } - } else { - return nil - } - } - return err -} - -func (c *Conn) send(cmd string, args ...interface{}) error { - if err := c.connect(); err != nil { - return err - } - - c.wm.Lock() - defer c.wm.Unlock() - if err := c.writeCommand(cmd, args); err != nil { c.finalize() return err @@ -139,9 +88,6 @@ func (c *Conn) send(cmd string, args ...interface{}) error { } func (c *Conn) Receive() (interface{}, error) { - c.rm.Lock() - defer c.rm.Unlock() - if reply, err := c.readReply(); err != nil { c.finalize() return nil, err @@ -155,9 +101,6 @@ func (c *Conn) Receive() (interface{}, error) { } func (c *Conn) ReceiveBulkTo(w io.Writer) error { - c.rm.Lock() - defer c.rm.Unlock() - err := c.readBulkReplyTo(w) if err != nil { if _, ok := err.(Error); !ok { @@ -168,44 +111,7 @@ func (c *Conn) ReceiveBulkTo(w io.Writer) error { } func (c *Conn) finalize() { - c.cm.Lock() - if !c.closed { - if c.c != nil { - c.c.Close() - } - c.closed = true - } - c.cm.Unlock() -} - -func (c *Conn) connect() error { - c.cm.Lock() - defer c.cm.Unlock() - - if !c.closed && c.c != nil { - return nil - } - - var err error - c.c, err = net.DialTimeout(getProto(c.addr), c.addr, c.connectTimeout) - if err != nil { - c.c = nil - return err - } - - if c.br != nil { - c.br.Reset(c.c) - } else { - c.br = bufio.NewReaderSize(c.c, c.rSize) - } - - if c.bw != nil { - c.bw.Reset(c.c) - } else { - c.bw = bufio.NewWriterSize(c.c, c.wSize) - } - - return nil + c.c.Close() } func (c *Conn) writeLen(prefix byte, n int) error { @@ -447,9 +353,12 @@ func (c *Conn) readReply() (interface{}, error) { return nil, errors.New("ledis: unexpected response line") } -func (c *Client) newConn(addr string) *Conn { - co := NewConnSize(addr, c.cfg.ReadBufferSize, c.cfg.WriteBufferSize) +func (c *Client) newConn(addr string) (*Conn, error) { + co, err := ConnectWithSize(addr, c.cfg.ReadBufferSize, c.cfg.WriteBufferSize) + if err != nil { + return nil, err + } co.client = c - return co + return co, nil } diff --git a/client/go/ledis/doc.go b/client/goledis/doc.go similarity index 100% rename from client/go/ledis/doc.go rename to client/goledis/doc.go diff --git a/client/go/ledis/garyburd_license b/client/goledis/garyburd_license similarity index 100% rename from client/go/ledis/garyburd_license rename to client/goledis/garyburd_license diff --git a/client/go/ledis/ledis_test.go b/client/goledis/ledis_test.go similarity index 100% rename from client/go/ledis/ledis_test.go rename to client/goledis/ledis_test.go diff --git a/client/go/ledis/reply.go b/client/goledis/reply.go similarity index 100% rename from client/go/ledis/reply.go rename to client/goledis/reply.go diff --git a/cmd/ledis-benchmark/main.go b/cmd/ledis-benchmark/main.go index 640607d..2c861c9 100644 --- a/cmd/ledis-benchmark/main.go +++ b/cmd/ledis-benchmark/main.go @@ -3,7 +3,7 @@ package main import ( "flag" "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "math/rand" "runtime" "strings" @@ -38,7 +38,7 @@ func bench(cmd string, f func(c *ledis.Conn)) { t1 := time.Now() for i := 0; i < *clients; i++ { go func() { - c := client.Get() + c, _ := client.Get() for j := 0; j < loop; j++ { f(c) } @@ -277,7 +277,7 @@ func main() { client = ledis.NewClient(cfg) for i := 0; i < *clients; i++ { - c := client.Get() + c, _ := client.Get() c.Close() } diff --git a/cmd/ledis-cli/const.go b/cmd/ledis-cli/const.go index a3e0674..df18fd6 100644 --- a/cmd/ledis-cli/const.go +++ b/cmd/ledis-cli/const.go @@ -1,28 +1,16 @@ -//This file was generated by .tools/generate_commands.py on Fri Feb 06 2015 09:15:18 +0800 +//This file was generated by .tools/generate_commands.py on Wed Mar 04 2015 09:31:59 +0800 package main var helpCommands = [][]string{ {"APPEND", "key value", "KV"}, - {"BCOUNT", "key [start end]", "Bitmap"}, - {"BDELETE", "key", "ZSet"}, {"BEGIN", "-", "Transaction"}, - {"BEXPIRE", "key seconds", "Bitmap"}, - {"BEXPIREAT", "key timestamp", "Bitmap"}, - {"BGET", "key", "Bitmap"}, - {"BGETBIT", "key offset", "Bitmap"}, {"BITCOUNT", "key [start] [end]", "KV"}, {"BITOP", "operation destkey key [key ...]", "KV"}, {"BITPOS", "key bit [start] [end]", "KV"}, {"BLPOP", "key [key ...] timeout", "List"}, - {"BMSETBIT", "key offset value [offset value ...]", "Bitmap"}, - {"BOPT", "operation destkey key [key ...]", "Bitmap"}, - {"BPERSIST", "key", "Bitmap"}, {"BRPOP", "key [key ...] timeout", "List"}, - {"BSETBIT", "key offset value", "Bitmap"}, - {"BTTL", "key", "Bitmap"}, - {"BXREVSCAN", "key [MATCH match] [COUNT count]", "Bitmap"}, - {"BXSCAN", "key [MATCH match] [COUNT count]", "Bitmap"}, {"COMMIT", "-", "Transaction"}, + {"CONFIG GET", "parameter", "Server"}, {"CONFIG REWRITE", "-", "Server"}, {"DECR", "key", "KV"}, {"DECRBY", "key decrement", "KV"}, @@ -50,6 +38,7 @@ var helpCommands = [][]string{ {"HGET", "key field", "Hash"}, {"HGETALL", "key", "Hash"}, {"HINCRBY", "key field increment", "Hash"}, + {"HKEYEXISTS", "key", "Hash"}, {"HKEYS", "key", "Hash"}, {"HLEN", "key", "Hash"}, {"HMCLEAR", "key [key ...]", "Hash"}, @@ -59,8 +48,6 @@ var helpCommands = [][]string{ {"HSET", "key field value", "Hash"}, {"HTTL", "key", "Hash"}, {"HVALS", "key", "Hash"}, - {"HXREVSCAN", "key [MATCH match] [COUNT count]", "Hash"}, - {"HXSCAN", "key [MATCH match] [COUNT count]", "Hash"}, {"INCR", "key", "KV"}, {"INCRBY", "key increment", "KV"}, {"INFO", "[section]", "Server"}, @@ -69,6 +56,7 @@ var helpCommands = [][]string{ {"LEXPIRE", "key seconds", "List"}, {"LEXPIREAT", "key timestamp", "List"}, {"LINDEX", "key index", "List"}, + {"LKEYEXISTS", "key", "List"}, {"LLEN", "key", "List"}, {"LMCLEAR", "key [key ...]", "List"}, {"LPERSIST", "key", "List"}, @@ -76,8 +64,6 @@ var helpCommands = [][]string{ {"LPUSH", "key value [value ...]", "List"}, {"LRANGE", "key start stop", "List"}, {"LTTL", "key", "List"}, - {"LXREVSCAN", "key [MATCH match] [COUNT count]", "List"}, - {"LXSCAN", "key [MATCH match] [COUNT count]", "List"}, {"MGET", "key [key ...]", "KV"}, {"MSET", "key value [key value ...]", "KV"}, {"PERSIST", "key", "KV"}, @@ -107,6 +93,7 @@ var helpCommands = [][]string{ {"SINTER", "key [key ...]", "Set"}, {"SINTERSTORE", "destination key [key ...]", "Set"}, {"SISMEMBER", "key member", "Set"}, + {"SKEYEXISTS", "key", "Set"}, {"SLAVEOF", "host port [RESTART] [READONLY]", "Replication"}, {"SMCLEAR", "key [key ...]", "Set"}, {"SMEMBERS", "key", "Set"}, @@ -116,23 +103,13 @@ var helpCommands = [][]string{ {"STTL", "key", "Set"}, {"SUNION", "key [key ...]", "Set"}, {"SUNIONSTORE", "destination key [key ...]", "Set"}, - {"SXREVSCAN", "key [MATCH match] [COUNT count]", "Set"}, - {"SXSCAN", "key [MATCH match] [COUNT count]", "Set"}, {"SYNC", "logid", "Replication"}, {"TIME", "-", "Server"}, {"TTL", "key", "KV"}, - {"XBREVSCAN", "key [MATCH match] [COUNT count]", "Bitmap"}, - {"XBSCAN", "key [MATCH match] [COUNT count]", "Bitmap"}, - {"XHREVSCAN", "key [MATCH match] [COUNT count]", "Hash"}, - {"XHSCAN", "key [MATCH match] [COUNT count]", "Hash"}, - {"XLREVSCAN", "key [MATCH match] [COUNT count]", "List"}, - {"XLSCAN", "key [MATCH match] [COUNT count]", "List"}, - {"XREVSCAN", "key [MATCH match] [COUNT count]", "KV"}, - {"XSCAN", "key [MATCH match] [COUNT count]", "KV"}, - {"XSREVSCAN", "key [MATCH match] [COUNT count]", "Set"}, - {"XSSCAN", "key [MATCH match] [COUNT count]", "Set"}, - {"XZREVSCAN", "key [MATCH match] [COUNT count]", "ZSet"}, - {"XZSCAN", "key [MATCH match] [COUNT count]", "ZSet"}, + {"XHSCAN", "key cursor [MATCH match] [COUNT count]", "Hash"}, + {"XSCAN", "type cursor [MATCH match] [COUNT count]", "Server"}, + {"XSSCAN", "key cursor [MATCH match] [COUNT count]", "Set"}, + {"XZSCAN", "key cursor [MATCH match] [COUNT count]", "ZSet"}, {"ZADD", "key score member [score member ...]", "ZSet"}, {"ZCARD", "key", "ZSet"}, {"ZCLEAR", "key", "ZSet"}, @@ -142,6 +119,7 @@ var helpCommands = [][]string{ {"ZEXPIREAT", "key timestamp", "ZSet"}, {"ZINCRBY", "key increment member", "ZSet"}, {"ZINTERSTORE", "destkey numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]", "ZSet"}, + {"ZKEYEXISTS", "ZSet", "Hash"}, {"ZLEXCOUNT", "key min max", "ZSet"}, {"ZMCLEAR", "key [key ...]", "ZSet"}, {"ZPERSIST", "key", "ZSet"}, @@ -159,6 +137,4 @@ var helpCommands = [][]string{ {"ZSCORE", "key member", "ZSet"}, {"ZTTL", "key", "ZSet"}, {"ZUNIONSTORE", "destkey numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]", "ZSet"}, - {"ZXREVSCAN", "key [MATCH match] [COUNT count]", "ZSet"}, - {"ZXSCAN", "key [MATCH match] [COUNT count]", "ZSet"}, } diff --git a/cmd/ledis-cli/main.go b/cmd/ledis-cli/main.go index ba97b30..e66fc11 100644 --- a/cmd/ledis-cli/main.go +++ b/cmd/ledis-cli/main.go @@ -3,7 +3,7 @@ package main import ( "flag" "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "regexp" "strconv" "strings" diff --git a/cmd/ledis-dump/main.go b/cmd/ledis-dump/main.go index 87be176..b6dd99e 100644 --- a/cmd/ledis-dump/main.go +++ b/cmd/ledis-dump/main.go @@ -3,7 +3,7 @@ package main import ( "flag" "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "os" ) @@ -32,7 +32,11 @@ func main() { addr = fmt.Sprintf("%s:%d", *host, *port) } - c := ledis.NewConnSize(addr, 16*1024, 4096) + c, err := ledis.ConnectWithSize(addr, 16*1024, 4096) + if err != nil { + println(err.Error()) + return + } defer c.Close() diff --git a/cmd/ledis-server/main.go b/cmd/ledis-server/main.go index e2f246a..594a046 100644 --- a/cmd/ledis-server/main.go +++ b/cmd/ledis-server/main.go @@ -11,6 +11,7 @@ import ( "os" "os/signal" "runtime" + "strings" "syscall" ) @@ -64,7 +65,16 @@ func main() { cfg.UseReplication = true } else { cfg.Readonly = *readonly - cfg.UseReplication = *rpl + + // if rpl in command flag, use it. + for _, arg := range os.Args { + arg := strings.ToLower(arg) + if arg == "-rpl" || arg == "-rpl=true" || arg == "-rpl=false" { + cfg.UseReplication = *rpl + break + } + } + cfg.Replication.Sync = *rplSync } diff --git a/config/config.go b/config/config.go index 92ea190..7c1346b 100644 --- a/config/config.go +++ b/config/config.go @@ -102,6 +102,8 @@ type Config struct { DataDir string `toml:"data_dir"` + Databases uint8 `toml:"databases"` + DBName string `toml:"db_name"` DBPath string `toml:"db_path"` DBSyncCommit int `toml:"db_sync_commit"` @@ -165,6 +167,9 @@ func NewConfigDefault() *Config { cfg.SlaveOf = "" cfg.Readonly = false + // default databases number + cfg.Databases = 16 + // disable access log cfg.AccessLog = "" @@ -209,7 +214,6 @@ func (cfg *Config) adjust() { cfg.ConnReadBufferSize = getDefault(4*KB, cfg.ConnReadBufferSize) cfg.ConnWriteBufferSize = getDefault(4*KB, cfg.ConnWriteBufferSize) cfg.TTLCheckInterval = getDefault(1, cfg.TTLCheckInterval) - } func (cfg *LevelDBConfig) adjust() { diff --git a/config/config.toml b/config/config.toml index 44b38d6..bcc3820 100644 --- a/config/config.toml +++ b/config/config.toml @@ -9,6 +9,11 @@ http_addr = "127.0.0.1:11181" # Data store path, all ledisdb's data will be saved here data_dir = "/tmp/ledis_server" +# Set the number of databases. You can use `select dbindex` to choose a db. +# dbindex must be in [0, databases - 1]. +# Maximum databases is 256. +databases = 16 + # Log server command, set empty to disable access_log = "" diff --git a/doc/DiffRedis.md b/doc/DiffRedis.md index a20cc1f..7722be3 100644 --- a/doc/DiffRedis.md +++ b/doc/DiffRedis.md @@ -5,10 +5,6 @@ so you can easily write your own LedisDB client based on a Redis one. Before you write a client, you must know some differences between LedisDB and Redis. -## Data Structure - -LedisDB has no Strings data type but KV and Bitmap, any some Keys and Strings commands in Redis will only affect KV data, and "bit" commands affect Bitmap. - ## Del In Redis, `del` can delete all type data, like String, Hash, List, etc, but in LedisDB, `del` can only delete KV data. To delete other type data, you will use "clear" commands. @@ -17,8 +13,7 @@ In Redis, `del` can delete all type data, like String, Hash, List, etc, but in L + Hash: `hclear`, `hmclear` + List: `lclear`, `lmclear` + Set: `sclear`, `smclear` -+ Zset: `zclear`, `zmclear` -+ Bitmap: `bclear`, `bmclear` ++ ZSet: `zclear`, `zmclear` ## Expire, Persist, and TTL @@ -29,7 +24,6 @@ The same for Del. + List: `lexpire`, `lpersist`, `lttl` + Set: `sexpire`, `spersist`, `sttl` + Zset: `zexpire`, `zpersist`, `zttl` -+ Bitmap: `bexpire`, `bpersist`, `bttl` ## ZSet @@ -47,14 +41,14 @@ Transaction API: ## Scan -LedisDB supplies `xscan`, `xrevscan`, etc, to fetch data iteratively and reverse iteratively. +LedisDB supplies `xscan`, `xhscan`, `xsscan`, `xzscan` to fetch data iteratively and reverse iteratively. -+ KV: `xscan`, `xrevscan` -+ Hash: `hxscan`, `hxrevscan`, `xhscan`, `xhrevscan` -+ List: `lxscan`, `lxrevscan`, `xlscan`, `xlrevscan` -+ Set: `sxscan` , `sxrevscan`, `xsscan`, `xsrevscan` -+ Zset: `zxscan`, `zxrevscan`, `xzscan`, `xzrevscan` -+ Bitmap: `bxscan`, `bxrevscan`, `xbscan`, `xbrevscan` +``` +XSCAN type cursor [MATCH match] [COUNT count] +XHSCAN key cursor [MATCH match] [COUNT count] +XSSCAN key cursor [MATCH match] [COUNT count] +XZSCAN key cursor [MATCH match] [COUNT count] +``` ## DUMP diff --git a/doc/commands.json b/doc/commands.json index 4b549db..1ae730a 100644 --- a/doc/commands.json +++ b/doc/commands.json @@ -1,59 +1,4 @@ { - "BCOUNT": { - "arguments": "key [start end]", - "group": "Bitmap", - "readonly": true - }, - "BDELETE": { - "arguments": "key", - "group": "ZSet", - "readonly": false - }, - "BEXPIRE": { - "arguments": "key seconds", - "group": "Bitmap", - "readonly": false - }, - "BEXPIREAT": { - "arguments": "key timestamp", - "group": "Bitmap", - "readonly": false - }, - "BGET": { - "arguments": "key", - "group": "Bitmap", - "readonly": true - }, - "BGETBIT": { - "arguments": "key offset", - "group": "Bitmap", - "readonly": true - }, - "BMSETBIT": { - "arguments": "key offset value [offset value ...]", - "group": "Bitmap", - "readonly": false - }, - "BOPT": { - "arguments": "operation destkey key [key ...]", - "group": "Bitmap", - "readonly": false - }, - "BPERSIST": { - "arguments": "key", - "group": "Bitmap", - "readonly": false - }, - "BSETBIT": { - "arguments": "key offset value", - "group": "Bitmap", - "readonly": false - }, - "BTTL": { - "arguments": "key", - "group": "Bitmap", - "readonly": true - }, "DECR": { "arguments": "key", "group": "KV", @@ -560,79 +505,7 @@ "group": "Transaction", "readonly": false }, - - "XSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "KV", - "readonly": true - }, - - "HXSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Hash", - "readonly": true - }, - - "LXSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "List", - "readonly": true - }, - - "SXSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Set", - "readonly": true - }, - - "ZXSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "ZSet", - "readonly": true - }, - - "BXSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Bitmap", - "readonly": true - }, - - "XREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "KV", - "readonly": true - }, - - "HXREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Hash", - "readonly": true - }, - - "LXREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "List", - "readonly": true - }, - - "SXREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Set", - "readonly": true - }, - - "ZXREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "ZSet", - "readonly": true - }, - - "BXREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Bitmap", - "readonly": true - }, - + "FLUSHALL": { "arguments": "-", "group": "Server", @@ -693,6 +566,12 @@ "readonly": false }, + "CONFIG GET": { + "arguments" : "parameter", + "group": "Server", + "readonly": true + }, + "DUMP": { "arguments" : "key", "group": "KV", @@ -724,66 +603,30 @@ "readonly": true }, - "XBSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Bitmap", - "readonly": true - }, - - "XLSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "List", + "XSCAN": { + "arguments": "type cursor [MATCH match] [COUNT count]", + "group": "Server", "readonly": true }, "XHSCAN": { - "arguments": "key [MATCH match] [COUNT count]", + "arguments": "key cursor [MATCH match] [COUNT count]", "group": "Hash", "readonly": true }, "XSSCAN": { - "arguments": "key [MATCH match] [COUNT count]", + "arguments": "key cursor [MATCH match] [COUNT count]", "group": "Set", "readonly": true }, "XZSCAN": { - "arguments": "key [MATCH match] [COUNT count]", + "arguments": "key cursor [MATCH match] [COUNT count]", "group": "ZSet", "readonly": true }, - "XHREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Hash", - "readonly": true - }, - - "XLREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "List", - "readonly": true - }, - - "XSREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Set", - "readonly": true - }, - - "XZREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "ZSet", - "readonly": true - }, - - "XBREVSCAN": { - "arguments": "key [MATCH match] [COUNT count]", - "group": "Bitmap", - "readonly": true - }, - "RESTORE": { "arguments" : "key ttl value", "group" : "Server", @@ -848,5 +691,29 @@ "arguments" : "key offset value", "group" : "KV", "readonly" : false + }, + + "HKEYEXISTS": { + "arguments" : "key", + "group" : "Hash", + "readonly" : true + }, + + "LKEYEXISTS": { + "arguments" : "key", + "group" : "List", + "readonly" : true + }, + + "SKEYEXISTS": { + "arguments" : "key", + "group" : "Set", + "readonly" : true + }, + + "ZKEYEXISTS": { + "arguments" : "ZSet", + "group" : "Hash", + "readonly" : true } } diff --git a/doc/commands.md b/doc/commands.md index 1cde082..a5f33d7 100644 --- a/doc/commands.md +++ b/doc/commands.md @@ -1,175 +1,161 @@ ## Summary -ledisdb use redis protocol called RESP(REdis Serialization Protocol), [here](http://redis.io/topics/protocol). +Ledisdb use redis protocol called RESP(REdis Serialization Protocol), [here](http://redis.io/topics/protocol). -ledisdb all commands return RESP format and it will use `int64` instead of `RESP integer`, `string` instead of `RESP simple string`, `bulk string` instead of `RESP bulk string`, and `array` instead of `RESP arrays` below. +Ledisdb all commands return RESP format and it will use `int64` instead of `RESP integer`, `string` instead of `RESP simple string`, `bulk string` instead of `RESP bulk string`, and `array` instead of `RESP arrays` below. -Table of Contents -================= +Most of the Ledisdb's commands are the same as Redis's, you can see the redis commands document for detailed information too. + + +**Commands List** -- [Summary](#summary) - [KV](#kv) - - [DECR key](#decr-key) - - [DECRBY key decrement](#decrby-key-decrement) - - [DEL key [key ...]](#del-key-key-) - - [EXISTS key](#exists-key) - - [GET key](#get-key) - - [GETSET key value](#getset-key-value) - - [INCR key](#incr-key) - - [INCRBY key increment](#incrby-key-increment) - - [MGET key [key ...]](#mget-key-key-) - - [MSET key value [key value ...]](#mset-key-value-key-value-) - - [SET key value](#set-key-value) - - [SETNX key value](#setnx-key-value) - - [SETEX key seconds value](#setex-key-seconds-value) - - [EXPIRE key seconds](#expire-key-seconds) - - [EXPIREAT key timestamp](#expireat-key-timestamp) - - [TTL key](#ttl-key) - - [PERSIST key](#persist-key) - - [XSCAN key [MATCH match] [COUNT count]](#xscan-key-match-match-count-count) - - [XREVSCAN key [MATCH match] [COUNT count]](#xrevscan-key-match-match-count-count) - - [DUMP key](#dump-key) + - [DECR key](#decr-key) + - [DECRBY key decrement](#decrby-key-decrement) + - [DEL key [key ...]](#del-key-key-) + - [EXISTS key](#exists-key) + - [GET key](#get-key) + - [GETSET key value](#getset-key-value) + - [INCR key](#incr-key) + - [INCRBY key increment](#incrby-key-increment) + - [MGET key [key ...]](#mget-key-key-) + - [MSET key value [key value ...]](#mset-key-value-key-value-) + - [SET key value](#set-key-value) + - [SETNX key value](#setnx-key-value) + - [SETEX key seconds value](#setex-key-seconds-value) + - [EXPIRE key seconds](#expire-key-seconds) + - [EXPIREAT key timestamp](#expireat-key-timestamp) + - [TTL key](#ttl-key) + - [PERSIST key](#persist-key) + - [DUMP key](#dump-key) + - [APPEND key value](#append-key-value) + - [GETRANGE key start end](#getrange-key-start-end) + - [SETRANGE key offset value](#setrange-key-offset-value) + - [STRLEN key](#strlen-key) + - [BITCOUNT key [start] [end]](#bitcount-key-start-end) + - [BITOP operation destkey key [key ...]](#bitop-operation-destkey-key-key-) + - [BITPOS key bit [start] [end]](#bitpos-key-bit-start-end) + - [GETBIT key offset](#getbit-key-offset) + - [SETBIT key offset value](#setbit-key-offset-value) - [Hash](#hash) - - [HDEL key field [field ...]](#hdel-key-field-field-) - - [HEXISTS key field](#hexists-key-field) - - [HGET key field](#hget-key-field) - - [HGETALL key](#hgetall-key) - - [HINCRBY key field increment](#hincrby-key-field-increment) - - [HKEYS key](#hkeys-key) - - [HLEN key](#hlen-key) - - [HMGET key field [field ...]](#hmget-key-field-field-) - - [HMSET key field value [field value ...]](#hmset-key-field-value-field-value-) - - [HSET key field value](#hset-key-field-value) - - [HVALS key](#hvals-key) - - [HCLEAR key](#hclear-key) - - [HMCLEAR key [key ...]](#hmclear-key-key) - - [HEXPIRE key seconds](#hexpire-key-seconds) - - [HEXPIREAT key timestamp](#hexpireat-key-timestamp) - - [HTTL key](#httl-key) - - [HPERSIST key](#hpersist-key) - - [HXSCAN key [MATCH match] [COUNT count]](#hxscan-key-match-match-count-count) - - [HXREVSCAN key [MATCH match] [COUNT count]](#hxrevscan-key-match-match-count-count) - - [XHSCAN key [MATCH match] [COUNT count]](#xhscan-key-match-match-count-count) - - [XHREVSCAN key [MATCH match] [COUNT count]](#xhrevscan-key-match-match-count-count) - - [HDUMP key](#hdump-key) + - [HDEL key field [field ...]](#hdel-key-field-field-) + - [HEXISTS key field](#hexists-key-field) + - [HGET key field](#hget-key-field) + - [HGETALL key](#hgetall-key) + - [HINCRBY key field increment](#hincrby-key-field-increment) + - [HKEYS key](#hkeys-key) + - [HLEN key](#hlen-key) + - [HMGET key field [field ...]](#hmget-key-field-field-) + - [HMSET key field value [field value ...]](#hmset-key-field-value-field-value-) + - [HSET key field value](#hset-key-field-value) + - [HVALS key](#hvals-key) + - [HCLEAR key](#hclear-key) + - [HMCLEAR key [key...]](#hmclear-key-key) + - [HEXPIRE key seconds](#hexpire-key-seconds) + - [HEXPIREAT key timestamp](#hexpireat-key-timestamp) + - [HTTL key](#httl-key) + - [HPERSIST key](#hpersist-key) + - [HDUMP key](#hdump-key) + - [HKEYEXISTS key](#hkeyexists-key) - [List](#list) - - [BLPOP key [key ...] timeout](#blpop-key-key--timeout) - - [BRPOP key [key ...] timeout](#brpop-key-key--timeout) - - [LINDEX key index](#lindex-key-index) - - [LLEN key](#llen-key) - - [LPOP key](#lpop-key) - - [LRANGE key start stop](#lrange-key-start-stop) - - [LPUSH key value [value ...]](#lpush-key-value-value-) - - [RPOP key](#rpop-keuser-content-y) - - [RPUSH key value [value ...]](#rpush-key-value-value-) - - [LCLEAR key](#lclear-key) - - [LMCLEAR key [key...]](#lmclear-key-key-) - - [LEXPIRE key seconds](#lexpire-key-seconds) - - [LEXPIREAT key timestamp](#lexpireat-key-timestamp) - - [LTTL key](#lttl-key) - - [LPERSIST key](#lpersist-key) - - [LXSCAN key [MATCH match] [COUNT count]](#lxscan-key-match-match-count-count) - - [LXREVSCAN key [MATCH match] [COUNT count]](#lxrevscan-key-match-match-count-count) - - [XLSCAN key [MATCH match] [COUNT count]](#xlscan-key-match-match-count-count) - - [XLREVSCAN key [MATCH match] [COUNT count]](#xlrevscan-key-match-match-count-count) - - [LDUMP key](#ldump-key) + - [BLPOP key [key ...] timeout](#blpop-key-key--timeout) + - [BRPOP key [key ...] timeout](#brpop-key-key--timeout) + - [LINDEX key index](#lindex-key-index) + - [LLEN key](#llen-key) + - [LPOP key](#lpop-key) + - [LRANGE key start stop](#lrange-key-start-stop) + - [LPUSH key value [value ...]](#lpush-key-value-value-) + - [RPOP key](#rpop-key) + - [RPUSH key value [value ...]](#rpush-key-value-value-) + - [LCLEAR key](#lclear-key) + - [LMCLEAR key [key ...]](#lmclear-key-key-) + - [LEXPIRE key seconds](#lexpire-key-seconds) + - [LEXPIREAT key timestamp](#lexpireat-key-timestamp) + - [LTTL key](#lttl-key) + - [LPERSIST key](#lpersist-key) + - [LDUMP key](#ldump-key) + - [LKEYEXISTS key](#lkeyexists-key) - [Set](#set) - - [SADD key member [member ...]](#sadd-key-member-member-) - - [SCARD key](#scard-key) - - [SDIFF key [key ...]](#sdiff-key-key-) - - [SDIFFSTORE destination key [key ...]](#sdiffstore-destination-key-key-) - - [SINTER key [key ...]](#sinter-key-key-) - - [SINTERSTORE destination key [key ...]](#sinterstore-destination-key-key-) - - [SISMEMBER key member](#sismember-key-member) - - [SMEMBERS key](#smembers-key) - - [SREM key member [member ...]](#srem-key-member-member-) - - [SUNION key [key ...]](#sunion-key-key-) - - [SUNIONSTORE destination key [key ...]](#sunionstore-destination-key-key-) - - [SCLEAR key](#sclear-key) - - [SMCLEAR key [key...]](#smclear-key-key) - - [SEXPIRE key seconds](#sexpire-key-seconds) - - [SEXPIREAT key timestamp](#sexpireat-key-timestamp) - - [STTL key](#sttl-key) - - [SPERSIST key](#spersist-key) - - [SXSCAN key [MATCH match] [COUNT count]](#sxscan-key-match-match-count-count) - - [SXREVSCAN key [MATCH match] [COUNT count]](#sxrevscan-key-match-match-count-count) - - [XSSCAN key [MATCH match] [COUNT count]](#xsscan-key-match-match-count-count) - - [XSREVSCAN key [MATCH match] [COUNT count]](#xsrevscan-key-match-match-count-count) - - [SDUMP key](#sdump-key) + - [SADD key member [member ...]](#sadd-key-member-member-) + - [SCARD key](#scard-key) + - [SDIFF key [key ...]](#sdiff-key-key-) + - [SDIFFSTORE destination key [key ...]](#sdiffstore-destination-key-key-) + - [SINTER key [key ...]](#sinter-key-key-) + - [SINTERSTORE destination key [key ...]](#sinterstore--destination-key-key-) + - [SISMEMBER key member](#sismember--key-member) + - [SMEMBERS key](#smembers-key) + - [SREM key member [member ...]](#srem--key-member-member-) + - [SUNION key [key ...]](#sunion-key-key-) + - [SUNIONSTORE destination key [key]](#sunionstore-destination-key-key) + - [SCLEAR key](#sclear-key) + - [SMCLEAR key [key ...]](#smclear-key-key-) + - [SEXPIRE key seconds](#sexpire-key-seconds) + - [SEXPIREAT key timestamp](#sexpireat-key-timestamp) + - [STTL key](#sttl-key) + - [SPERSIST key](#spersist-key) + - [SDUMP key](#sdump-key) + - [SKEYEXISTS key](#skeyexists-key) - [ZSet](#zset) - - [ZADD key score member [score member ...]](#zadd-key-score-member-score-member-) - - [ZCARD key](#zcard-key) - - [ZCOUNT key min max](#zcount-key-min-max) - - [ZINCRBY key increment member](#zincrby-key-increment-member) - - [ZRANGE key start stop [WITHSCORES]](#zrange-key-start-stop-withscores) - - [ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]](#zrangebyscore-key-min-max-withscores-limit-offset-count) - - [ZRANK key member](#zrank-key-member) - - [ZREM key member [member ...]](#zrem-key-member-member-) - - [ZREMRANGEBYRANK key start stop](#zremrangebyrank-key-start-stop) - - [ZREMRANGEBYSCORE key min max](#zremrangebyscore-key-min-max) - - [ZREVRANGE key start stop [WITHSCORES]](#zrevrange-key-start-stop-withscores) - - [ZREVRANGEBYSCORE key max min [WITHSCORES] [LIMIT offset count]](#zrevrangebyscore-key-max-min-withscores-limit-offset-count) - - [ZREVRANK key member](#zrevrank-key-member) - - [ZSCORE key member](#zscore-key-member) - - [ZCLEAR key](#zclear-key) - - [ZMCLEAR key [key ...]](#zmclear-key-key-) - - [ZEXPIRE key seconds](#zexpire-key-seconds) - - [ZEXPIREAT key timestamp](#zexpireat-key-timestamp) - - [ZTTL key](#zttl-key) - - [ZPERSIST key](#zpersist-key) - - [ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX] -](#zunionstore-destination-numkeys-key-key--weights-weight-weight--aggregate-summinmax) - - [ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX] -](#zinterstore-destination-numkeys-key-key--weights-weight-weight--aggregate-summinmax) - - [ZXSCAN key [MATCH match] [COUNT count]](#zxscan-key-match-match-count-count) - - [ZXREVSCAN key [MATCH match] [COUNT count]](#zxrevscan-key-match-match-count-count) - - [XZSCAN key [MATCH match] [COUNT count]](#xzscan-key-match-match-count-count) - - [XZREVSCAN key [MATCH match] [COUNT count]](#xzrevscan-key-match-match-count-count) - - [ZRANGEBYLEX key min max [LIMIT offset count]](#zrangebylex-key-min-max-limit-offset-count) - - [ZREMRANGEBYLEX key min max](#zremrangebylex-key-min-max) - - [ZLEXCOUNT key min max](#zlexcount-key-min-max) - - [ZDUMP key](#zdump-key) -- [Bitmap](#bitmap) - - [BGET key](#bget-key) - - [BGETBIT key offset](#bgetbit-key-offset) - - [BSETBIT key offset value](#bsetbit-key-offset-value) - - [BMSETBIT key offset value[offset value ...]](#bmsetbit-key-offset-value-offset-value-) - - [BOPT operation destkey key [key ...]](#bopt-operation-destkey-key-key-) - - [BCOUNT key [start, end]](#bcount-key-start-end) - - [BEXPIRE key seconds](#bexpire-key-seconds) - - [BEXPIREAT key timestamp](#bexpireat-key-timestamp) - - [BTTL key](#bttl-key) - - [BPERSIST key](#bpersist-key) - - [BXSCAN key [MATCH match] [COUNT count]](#bxscan-key-match-match-count-count) - - [BXREVSCAN key [MATCH match] [COUNT count]](#bxrevscan-key-match-match-count-count) - - [XBSCAN key [MATCH match] [COUNT count]](#xbscan-key-match-match-count-count) - - [XBREVSCAN key [MATCH match] [COUNT count]](#xbrevscan-key-match-match-count-count) + - [ZADD key score member [score member ...]](#zadd-key-score-member-score-member-) + - [ZCARD key](#zcard-key) + - [ZCOUNT key min max](#zcount-key-min-max) + - [ZINCRBY key increment member](#zincrby-key-increment-member) + - [ZRANGE key start stop [WITHSCORES]](#zrange-key-start-stop-withscores) + - [ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]](#zrangebyscore-key-min-max-withscores-limit-offset-count) + - [ZRANK key member](#zrank-key-member) + - [ZREM key member [member ...]](#zrem-key-member-member-) + - [ZREMRANGEBYRANK key start stop](#zremrangebyrank-key-start-stop) + - [ZREMRANGEBYSCORE key min max](#zremrangebyscore-key-min-max) + - [ZREVRANGE key start stop [WITHSCORES]](#zrevrange-key-start-stop-withscores) + - [ZREVRANGEBYSCORE key max min [WITHSCORES] [LIMIT offset count]](#zrevrangebyscore--key-max-min-withscores-limit-offset-count) + - [ZREVRANK key member](#zrevrank-key-member) + - [ZSCORE key member](#zscore-key-member) + - [ZCLEAR key](#zclear-key) + - [ZMCLEAR key [key ...]](#zmclear-key-key-) + - [ZEXPIRE key seconds](#zexpire-key-seconds) + - [ZEXPIREAT key timestamp](#zexpireat-key-timestamp) + - [ZTTL key](#zttl-key) + - [ZPERSIST key](#zpersist-key) + - [ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]](#zunionstore-destination-numkeys-key-key--weights-weight-weight--aggregate-sum|min|max) + - [ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]](#zinterstore-destination-numkeys-key-key--weights-weight-weight--aggregate-sum|min|max) + - [ZRANGEBYLEX key min max [LIMIT offset count]](#zrangebylex-key-min-max-limit-offset-count) + - [ZREMRANGEBYLEX key min max](#zremrangebylex-key-min-max) + - [ZLEXCOUNT key min max](#zlexcount-key-min-max) + - [ZDUMP key](#zdump-key) + - [ZKEYEXISTS key](#zkeyexists-key) +- [Scan](#scan) + - [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count) + - [XHSCAN key cursor [MATCH match] [COUNT count]](#xhscan-key-cursor-match-match-count-count) + - [XSSCAN key cursor [MATCH match] [COUNT count]](#xsscan-key-cursor-match-match-count-count) + - [XZSCAN key cursor [MATCH match] [COUNT count]](#xzscan-key-cursor-match-match-count-count) - [Replication](#replication) - - [SLAVEOF host port [RESTART] [READONLY]](#slaveof-host-port-restart-readonly) - - [FULLSYNC [NEW]](#fullsync-new) - - [SYNC logid](#sync-logid) + - [SLAVEOF host port [RESTART] [READONLY]](#slaveof-host-port-restart-readonly) + - [FULLSYNC [NEW]](#fullsync-new) + - [SYNC logid](#sync-logid) - [Server](#server) - - [PING](#ping) - - [ECHO message](#echo-message) - - [SELECT index](#select-index) - - [FLUSHALL](#flushall) - - [FLUSHDB](#flushdb) - - [INFO [section]](#info-section) - - [TIME](#time) - - [CONFIG REWRITE](#config-rewrite) - - [RESTORE key ttl value](#restore-key-ttl-value) - - [ROLE](#role) + - [PING](#ping) + - [ECHO message](#echo-message) + - [SELECT index](#select-index) + - [FLUSHALL](#flushall) + - [FLUSHDB](#flushdb) + - [INFO [section]](#info-section) + - [TIME](#time) + - [CONFIG REWRITE](#config-rewrite) + - [RESTORE key ttl value](#restore-key-ttl-value) + - [ROLE](#role) - [Transaction](#transaction) - - [BEGIN](#begin) - - [ROLLBACK](#rollback) - - [COMMIT](#commit) + - [BEGIN](#begin) + - [ROLLBACK](#rollback) + - [COMMIT](#commit) - [Script](#script) - - [EVAL script numkeys key [key ...] arg [arg ...]](#eval-script-numkeys-key-key--arg-arg-) - - [EVALSHA sha1 numkeys key [key ...] arg [arg ...]](#evalsha-sha1-numkeys-key-key--arg-arg-) - - [SCRIPT LOAD script](#script-load-script) - - [SCRIPT EXISTS script [script ...]](#script-exists-script-script-) - - [SCRIPT FLUSH](#script-flush) + - [EVAL script numkeys key [key ...] arg [arg ...]](#eval-script-numkeys-key-key--arg-arg-) + - [EVALSHA sha1 numkeys key [key ...] arg [arg ...]](#evalsha-sha1-numkeys-key-key--arg-arg-) + - [SCRIPT LOAD script](#script-load-script) + - [SCRIPT EXISTS script [script ...]](#script-exists-script-script-) + - [SCRIPT FLUSH](#script-flush) + + ## KV @@ -524,82 +510,6 @@ ledis> TTL mykey (integer) -1 ``` -### XSCAN key [MATCH match] [COUNT count] - -Iterate KV keys incrementally. - -Key is the start for the current iteration. -Match is the regexp for checking matched key. -Count is the maximum retrieved elememts number, default is 10. - -**Return value** - -an array of two values, first value is the key for next iteration, second value is an array of elements. - -**Examples** - -``` -ledis>set a 1 -OK -ledis>set b 2 -OK -ledis>set c 3 -OK -127.0.0.1:6380>xscan "" -1) "" -2) ["a" "b" "c"] -ledis>xscan "" count 1 -1) "a" -2) ["a"] -ledis>xscan "a" count 1 -1) "b" -2) ["b"] -ledis>xscan "b" count 1 -1) "c" -2) ["c"] -ledis>xscan "c" count 1 -1) "" -2) [] -``` - -### XREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate KV keys incrementally. - -Key is the start for the current iteration. -Match is the regexp for checking matched key. -Count is the maximum retrieved elememts number, default is 10. - -**Return value** - -an array of two values, first value is the key for next iteration, second value is an array of elements. - -**Examples** - -``` -ledis>set a 1 -OK -ledis>set b 2 -OK -ledis>set c 3 -OK -127.0.0.1:6380>xrevscan "" -1) "" -2) ["c" "b" "a"] -ledis>xrevscan "" count 1 -1) "c" -2) ["c"] -ledis>xrevscan "c" count 1 -1) "b" -2) ["b"] -ledis>xrevscan "b" count 1 -1) "a" -2) ["a"] -ledis>xrevscan "a" count 1 -1) "" -2) [] -``` - ### DUMP key Serialize the value stored at key with KV type in a Redis-specific format like RDB and return it to the user. The returned value can be synthesized back into a key using the RESTORE command. @@ -617,6 +527,25 @@ ledis>DUMP mykey "\x00\xc0\n\x06\x00\xf8r?\xc5\xfb\xfb_(" ``` +### APPEND key value + +### GETRANGE key start end + +### SETRANGE key offset value + +### STRLEN key + +### BITCOUNT key [start] [end] + +### BITOP operation destkey key [key ...] + +### BITPOS key bit [start] [end] + +### GETBIT key offset + +### SETBIT key offset value + + ## Hash ### HDEL key field [field ...] @@ -980,35 +909,14 @@ ledis> HPERSIST not_exists_key (integer) 0 ``` -### HXSCAN key [MATCH match] [COUNT count] - -Iterate Hash keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - -### HXREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate Hash keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - -### XHSCAN key [MATCH match] [COUNT count] - -Iterate Hash keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - -### XHREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate Hash keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - - ### HDUMP key See [DUMP](#dump-key) for more information. +### HKEYEXISTS key + +Check key exists for hash data, like [EXISTS key](#exists-key) + ## List ### BLPOP key [key ...] timeout @@ -1331,35 +1239,13 @@ ledis> LPERSIST b (integer) 0 ``` -### LXSCAN key [MATCH match] [COUNT count] - -Iterate list keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - -### LXREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate list keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - -### XLSCAN key [MATCH match] [COUNT count] - -Iterate list keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - -### XLREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate list keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - - ### LDUMP key See [DUMP](#dump-key) for more information. +### LKEYEXISTS key + +Check key exists for list data, like [EXISTS key](#exists-key) ## Set @@ -1782,37 +1668,14 @@ ledis> STTL key (integer) -1 ``` -### SXSCAN key [MATCH match] [COUNT count] - -Iterate Set keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - - -### SXREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate Set keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - -### XSSCAN key [MATCH match] [COUNT count] - -Iterate Set keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - - -### XSREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate Set keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - - ### SDUMP key See [DUMP](#dump-key) for more information. +### SKEYEXISTS key + +Check key exists for set data, like [EXISTS key](#exists-key) + ## ZSet ### ZADD key score member [score member ...] @@ -2432,30 +2295,6 @@ ledis> ZRANGE out 0 -1 WITHSCORES 4) "10" ``` -### ZXSCAN key [MATCH match] [COUNT count] - -Iterate ZSet keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - -### ZXREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate ZSet keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - -### XZSCAN key [MATCH match] [COUNT count] - -Iterate ZSet keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - -### XZREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate ZSet keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - ### ZRANGEBYLEX key min max [LIMIT offset count] When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at key with a value between min and max. @@ -2526,187 +2365,64 @@ ledis> ZLEXCOUNT myzset - [c See [DUMP](#dump-key) for more information. -## Bitmap +### ZKEYEXISTS key -### BGET key +Check key exists for zset data, like [EXISTS key](#exists-key) -Returns the whole binary data stored at `key`. +## Scan + +### XSCAN type cursor [MATCH match] [COUNT count] + +Iterate data type keys incrementally. + +Type is "KV", "LIST", "HASH", "SET" or "ZSET". +Cursor is the start for the current iteration. +Match is the regexp for checking matched key. +Count is the maximum retrieved elememts number, default is 10. **Return value** -bulk: the raw value of key, or nil when key does not exist. +an array of two values, first value is the cursor for next iteration, second value is an array of elements. **Examples** ``` -ledis> BMSETBIT flag 0 1 5 1 6 1 -(integer) 3 -ledis> BGET flag -a +ledis>set a 1 +OK +ledis>set b 2 +OK +ledis>set c 3 +OK +127.0.0.1:6380>xscan "KV" "" +1) "" +2) ["a" "b" "c"] +ledis>xscan "KV" "" count 1 +1) "a" +2) ["a"] +ledis>xscan "KV" "a" count 1 +1) "b" +2) ["b"] +ledis>xscan "KV" "b" count 1 +1) "c" +2) ["c"] +ledis>xscan "KV" "c" count 1 +1) "" +2) [] ``` +### XHSCAN key cursor [MATCH match] [COUNT count] -### BGETBIT key offset +Same like [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count), but return array of elements +contains two elements, a field and a value. -Returns the bit value at `offset` in the string value stored at `key`. -When *offset* beyond the data length, ot the target data is not exist, the bit value will be 0 always. +### XSSCAN key cursor [MATCH match] [COUNT count] -**Return value** +Same like [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count) -int64 : the bit value stored at offset. +### XZSCAN key cursor [MATCH match] [COUNT count] -**Examples** - -``` -ledis> BSETBIT flag 1024 1 -(integer) 0 -ledis> BGETBIT flag 0 -(integer) 0 -ledis> BGETBIT flag 1024 -(integer) 1 -ledis> BGETBIT flag 65535 -(integer) 0 -``` - - -### BSETBIT key offset value - -Sets or clear the bit at `offset` in the binary data sotred at `key`. -The bit is either set or cleared depending on `value`, which can be either `0` or `1`. -The *offset* argument is required to be qual to 0, and smaller than -2^23 (this means bitmap limits to 8MB). - -**Return value** - -int64 : the original bit value stored at offset. - -**Examples** - -``` -ledis> BSETBIT flag 0 1 -(integer) 0 -ledis> BSETBIT flag 0 0 -(integer) 1 -ledis> BGETBIT flag 0 99 -ERR invalid command param -``` - -### BMSETBIT key offset value [offset value ...] -Sets the given *offset* to their respective values. - -**Return value** - -int64 : The number of input *offset* - -**Examples** - -``` -ledis> BMSETBIT flag 0 1 1 1 2 0 3 1 -(integer) 4 -ledis> BCOUNT flag -(integer) 3 -``` - - -### BOPT operation destkey key [key ...] -Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key. - -**Return value** - -Int64: -The size of the string stored in the destination key, that is equal to the size of the longest input string. -**Examples** - -``` -ledis> BMSETBIT a 0 1 2 1 -(integer) 2 -ledis> BMSETBIT b 1 1 -(integer) 1 -ledis> BOPT AND res a b -(integer) 3 -ledis> BCOUNT res -(integer) 0 -ledis> BOPT OR res2 a b -(integer) 3 -ledis> BCOUNT res2 -(integer) 3 -ledis> BOPT XOR res3 a b -(integer) 3 -ledis> BCOUNT res3 -(integer) 3 -``` - -### BCOUNT key [start end] - -Count the number of set bits in a bitmap. - -**Return value** - -int64 : The number of bits set to 1. - -**Examples** - -``` -ledis> BMSETBIT flag 0 1 5 1 6 1 -(integer) 3 -ledis> BGET flag -a -ledis> BCOUNT flag -(integer) 3 -ledis> BCOUNT flag 0 0s -(integer) 1 -ledis> BCOUNT flag 0 4 -(integer) 1 -ledis> BCOUNT flag 0 5 -(integer) 2 -ledis> BCOUNT flag 5 6 -(integer) 2 -``` - - -### BEXPIRE key seconds - -(refer to [EXPIRE](#expire-key-seconds) api for other types) - - -### BEXPIREAT key timestamp - -(refer to [EXPIREAT](#expireat-key-timestamp) api for other types) - - -### BTTL key - -(refer to [TTL](#ttl-key) api for other types) - - -### BPERSIST key - -(refer to [PERSIST](#persist-key) api for other types) - - -### BXSCAN key [MATCH match] [COUNT count] - -Iterate Bitmap keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - -### BXREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate Bitmap keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. - -### XBSCAN key [MATCH match] [COUNT count] - -Iterate Bitmap keys incrementally. - -See [XSCAN](#xscan-key-match-match-count-count) for more information. - -### XBREVSCAN key [MATCH match] [COUNT count] - -Reverse iterate Bitmap keys incrementally. - -See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information. +Same like [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count), but return array of elements +contains two elements, a member and its associated score. ## Replication diff --git a/doc/doctoc_commands.sh b/doc/doctoc_commands.sh new file mode 100755 index 0000000..bde1869 --- /dev/null +++ b/doc/doctoc_commands.sh @@ -0,0 +1 @@ +doctoc --title '**Commands List**' commands.md \ No newline at end of file diff --git a/etc/ledis.conf b/etc/ledis.conf index 44b38d6..bcc3820 100644 --- a/etc/ledis.conf +++ b/etc/ledis.conf @@ -9,6 +9,11 @@ http_addr = "127.0.0.1:11181" # Data store path, all ledisdb's data will be saved here data_dir = "/tmp/ledis_server" +# Set the number of databases. You can use `select dbindex` to choose a db. +# dbindex must be in [0, databases - 1]. +# Maximum databases is 256. +databases = 16 + # Log server command, set empty to disable access_log = "" diff --git a/ledis/const.go b/ledis/const.go index 5d4b587..c6461db 100644 --- a/ledis/const.go +++ b/ledis/const.go @@ -6,20 +6,57 @@ import ( const Version = "0.4" +type DataType byte + +// for out use const ( - NoneType byte = 0 - KVType byte = 1 - HashType byte = 2 - HSizeType byte = 3 - ListType byte = 4 - LMetaType byte = 5 - ZSetType byte = 6 - ZSizeType byte = 7 - ZScoreType byte = 8 - BitType byte = 9 - BitMetaType byte = 10 - SetType byte = 11 - SSizeType byte = 12 + KV DataType = iota + LIST + HASH + SET + ZSET +) + +func (d DataType) String() string { + switch d { + case KV: + return KVName + case LIST: + return ListName + case HASH: + return HashName + case SET: + return SetName + case ZSET: + return ZSetName + default: + return "unknown" + } +} + +const ( + KVName = "KV" + ListName = "LIST" + HashName = "HASH" + SetName = "SET" + ZSetName = "ZSET" +) + +// for backend store +const ( + NoneType byte = 0 + KVType byte = 1 + HashType byte = 2 + HSizeType byte = 3 + ListType byte = 4 + LMetaType byte = 5 + ZSetType byte = 6 + ZSizeType byte = 7 + ZScoreType byte = 8 + // BitType byte = 9 + // BitMetaType byte = 10 + SetType byte = 11 + SSizeType byte = 12 maxDataType byte = 100 @@ -36,16 +73,16 @@ const ( var ( TypeName = map[byte]string{ - KVType: "kv", - HashType: "hash", - HSizeType: "hsize", - ListType: "list", - LMetaType: "lmeta", - ZSetType: "zset", - ZSizeType: "zsize", - ZScoreType: "zscore", - BitType: "bit", - BitMetaType: "bitmeta", + KVType: "kv", + HashType: "hash", + HSizeType: "hsize", + ListType: "list", + LMetaType: "lmeta", + ZSetType: "zset", + ZSizeType: "zsize", + ZScoreType: "zscore", + // BitType: "bit", + // BitMetaType: "bitmeta", SetType: "set", SSizeType: "ssize", ExpTimeType: "exptime", @@ -67,9 +104,6 @@ var ( ) const ( - //we don't support too many databases - MaxDBNumber uint8 = 16 - //max key size MaxKeySize int = 1024 diff --git a/ledis/event.go b/ledis/event.go index 2a3b54a..b9a4833 100644 --- a/ledis/event.go +++ b/ledis/event.go @@ -81,20 +81,20 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { buf = append(buf, ' ') buf = strconv.AppendInt(buf, score, 10) } - case BitType: - if key, seq, err := db.bDecodeBinKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendUint(buf, uint64(seq), 10) - } - case BitMetaType: - if key, err := db.bDecodeMetaKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - } + // case BitType: + // if key, seq, err := db.bDecodeBinKey(k); err != nil { + // return nil, err + // } else { + // buf = strconv.AppendQuote(buf, hack.String(key)) + // buf = append(buf, ' ') + // buf = strconv.AppendUint(buf, uint64(seq), 10) + // } + // case BitMetaType: + // if key, err := db.bDecodeMetaKey(k); err != nil { + // return nil, err + // } else { + // buf = strconv.AppendQuote(buf, hack.String(key)) + // } case SetType: if key, member, err := db.sDecodeSetKey(k); err != nil { return nil, err diff --git a/ledis/info.go b/ledis/info.go deleted file mode 100644 index df3ee72..0000000 --- a/ledis/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package ledis - -import () - -// todo, add info - -// type Keyspace struct { -// Kvs int `json:"kvs"` -// KvExpires int `json:"kv_expires"` - -// Lists int `json:"lists"` -// ListExpires int `json:"list_expires"` - -// Bitmaps int `json:"bitmaps"` -// BitmapExpires int `json:"bitmap_expires"` - -// ZSets int `json:"zsets"` -// ZSetExpires int `json:"zset_expires"` - -// Hashes int `json:"hashes"` -// HashExpires int `json:"hahsh_expires"` -// } - -// type Info struct { -// KeySpaces [MaxDBNumber]Keyspace -// } diff --git a/ledis/ledis.go b/ledis/ledis.go index c2d7f36..bda8f5a 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -18,7 +18,7 @@ type Ledis struct { cfg *config.Config ldb *store.DB - dbs [MaxDBNumber]*DB + dbs []*DB quit chan struct{} wg sync.WaitGroup @@ -35,7 +35,7 @@ type Ledis struct { lock io.Closer - tcs [MaxDBNumber]*ttlChecker + tcs []*ttlChecker } func Open(cfg *config.Config) (*Ledis, error) { @@ -43,6 +43,10 @@ func Open(cfg *config.Config) (*Ledis, error) { cfg.DataDir = config.DefaultDataDir } + if cfg.Databases == 0 { + cfg.Databases = 16 + } + os.MkdirAll(cfg.DataDir, 0755) var err error @@ -78,7 +82,8 @@ func Open(cfg *config.Config) (*Ledis, error) { l.r = nil } - for i := uint8(0); i < MaxDBNumber; i++ { + l.dbs = make([]*DB, cfg.Databases) + for i := uint8(0); i < cfg.Databases; i++ { l.dbs[i] = l.newDB(i) } @@ -105,7 +110,7 @@ func (l *Ledis) Close() { } func (l *Ledis) Select(index int) (*DB, error) { - if index < 0 || index >= int(MaxDBNumber) { + if index < 0 || index >= len(l.dbs) { return nil, fmt.Errorf("invalid db index %d", index) } @@ -167,6 +172,7 @@ func (l *Ledis) IsReadOnly() bool { } func (l *Ledis) checkTTL() { + l.tcs = make([]*ttlChecker, len(l.dbs)) for i, db := range l.dbs { c := newTTLChecker(db) @@ -174,7 +180,7 @@ func (l *Ledis) checkTTL() { c.register(ListType, db.listBatch, db.lDelete) c.register(HashType, db.hashBatch, db.hDelete) c.register(ZSetType, db.zsetBatch, db.zDelete) - c.register(BitType, db.binBatch, db.bDelete) + // c.register(BitType, db.binBatch, db.bDelete) c.register(SetType, db.setBatch, db.sDelete) l.tcs[i] = c diff --git a/ledis/ledis_db.go b/ledis/ledis_db.go index ebde98e..2c755e1 100644 --- a/ledis/ledis_db.go +++ b/ledis/ledis_db.go @@ -36,8 +36,8 @@ type DB struct { listBatch *batch hashBatch *batch zsetBatch *batch - binBatch *batch - setBatch *batch + // binBatch *batch + setBatch *batch status uint8 @@ -60,7 +60,7 @@ func (l *Ledis) newDB(index uint8) *DB { d.listBatch = d.newBatch() d.hashBatch = d.newBatch() d.zsetBatch = d.newBatch() - d.binBatch = d.newBatch() + // d.binBatch = d.newBatch() d.setBatch = d.newBatch() d.lbkeys = newLBlockKeys() @@ -86,7 +86,6 @@ func (db *DB) FlushAll() (drop int64, err error) { db.lFlush, db.hFlush, db.zFlush, - db.bFlush, db.sFlush} for _, flush := range all { @@ -117,9 +116,9 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { case ZSetType: deleteFunc = db.zDelete metaDataType = ZSizeType - case BitType: - deleteFunc = db.bDelete - metaDataType = BitMetaType + // case BitType: + // deleteFunc = db.bDelete + // metaDataType = BitMetaType case SetType: deleteFunc = db.sDelete metaDataType = SSizeType @@ -128,7 +127,7 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { } var keys [][]byte - keys, err = db.scan(metaDataType, nil, 1024, false, "") + keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false) for len(keys) != 0 || err != nil { for _, key := range keys { deleteFunc(t, key) @@ -141,7 +140,7 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { } else { drop += int64(len(keys)) } - keys, err = db.scan(metaDataType, nil, 1024, false, "") + keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false) } return } diff --git a/ledis/multi.go b/ledis/multi.go index 29abe34..db695c3 100644 --- a/ledis/multi.go +++ b/ledis/multi.go @@ -44,7 +44,7 @@ func (db *DB) Multi() (*Multi, error) { m.DB.listBatch = m.newBatch() m.DB.hashBatch = m.newBatch() m.DB.zsetBatch = m.newBatch() - m.DB.binBatch = m.newBatch() + // m.DB.binBatch = m.newBatch() m.DB.setBatch = m.newBatch() m.DB.lbkeys = db.lbkeys @@ -66,7 +66,7 @@ func (m *Multi) Close() error { } func (m *Multi) Select(index int) error { - if index < 0 || index >= int(MaxDBNumber) { + if index < 0 || index >= int(m.l.cfg.Databases) { return fmt.Errorf("invalid db index %d", index) } diff --git a/ledis/scan.go b/ledis/scan.go index 9e8e235..466f1ec 100644 --- a/ledis/scan.go +++ b/ledis/scan.go @@ -9,19 +9,48 @@ import ( var errDataType = errors.New("error data type") var errMetaKey = errors.New("error meta key") -func (db *DB) scan(dataType byte, key []byte, count int, inclusive bool, match string) ([][]byte, error) { - return db.scanGeneric(dataType, key, count, inclusive, match, false) +//fif inclusive is true, scan range [cursor, inf) else (cursor, inf) +func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { + storeDataType, err := getDataStoreType(dataType) + if err != nil { + return nil, err + } + + return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false) } -func (db *DB) revscan(dataType byte, key []byte, count int, inclusive bool, match string) ([][]byte, error) { - return db.scanGeneric(dataType, key, count, inclusive, match, true) +//if inclusive is true, revscan range (-inf, cursor] else (inf, cursor) +func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { + storeDataType, err := getDataStoreType(dataType) + if err != nil { + return nil, err + } + + return db.scanGeneric(storeDataType, cursor, count, inclusive, match, true) } -func (db *DB) scanGeneric(dataType byte, key []byte, count int, - inclusive bool, match string, reverse bool) ([][]byte, error) { - var minKey, maxKey []byte +func getDataStoreType(dataType DataType) (byte, error) { + var storeDataType byte + switch dataType { + case KV: + storeDataType = KVType + case LIST: + storeDataType = LMetaType + case HASH: + storeDataType = HSizeType + case SET: + storeDataType = SSizeType + case ZSET: + storeDataType = ZSizeType + default: + return 0, errDataType + } + return storeDataType, nil +} + +func buildMatchRegexp(match string) (*regexp.Regexp, error) { var err error - var r *regexp.Regexp + var r *regexp.Regexp = nil if len(match) > 0 { if r, err = regexp.Compile(match); err != nil { @@ -29,13 +58,24 @@ func (db *DB) scanGeneric(dataType byte, key []byte, count int, } } + return r, nil +} + +func (db *DB) scanGeneric(storeDataType byte, key []byte, count int, + inclusive bool, match string, reverse bool) ([][]byte, error) { + var minKey, maxKey []byte + r, err := buildMatchRegexp(match) + if err != nil { + return nil, err + } + tp := store.RangeOpen if !reverse { - if minKey, err = db.encodeScanMinKey(dataType, key); err != nil { + if minKey, err = db.encodeScanMinKey(storeDataType, key); err != nil { return nil, err } - if maxKey, err = db.encodeScanMaxKey(dataType, nil); err != nil { + if maxKey, err = db.encodeScanMaxKey(storeDataType, nil); err != nil { return nil, err } @@ -43,10 +83,10 @@ func (db *DB) scanGeneric(dataType byte, key []byte, count int, tp = store.RangeROpen } } else { - if minKey, err = db.encodeScanMinKey(dataType, nil); err != nil { + if minKey, err = db.encodeScanMinKey(storeDataType, nil); err != nil { return nil, err } - if maxKey, err = db.encodeScanMaxKey(dataType, key); err != nil { + if maxKey, err = db.encodeScanMaxKey(storeDataType, key); err != nil { return nil, err } @@ -69,7 +109,7 @@ func (db *DB) scanGeneric(dataType byte, key []byte, count int, v := make([][]byte, 0, count) for i := 0; it.Valid() && i < count; it.Next() { - if k, err := db.decodeScanKey(dataType, it.Key()); err != nil { + if k, err := db.decodeScanKey(storeDataType, it.Key()); err != nil { continue } else if r != nil && !r.Match(k) { continue @@ -82,36 +122,36 @@ func (db *DB) scanGeneric(dataType byte, key []byte, count int, return v, nil } -func (db *DB) encodeScanMinKey(dataType byte, key []byte) ([]byte, error) { +func (db *DB) encodeScanMinKey(storeDataType byte, key []byte) ([]byte, error) { if len(key) == 0 { - return db.encodeScanKey(dataType, nil) + return db.encodeScanKey(storeDataType, nil) } else { if err := checkKeySize(key); err != nil { return nil, err } - return db.encodeScanKey(dataType, key) + return db.encodeScanKey(storeDataType, key) } } -func (db *DB) encodeScanMaxKey(dataType byte, key []byte) ([]byte, error) { +func (db *DB) encodeScanMaxKey(storeDataType byte, key []byte) ([]byte, error) { if len(key) > 0 { if err := checkKeySize(key); err != nil { return nil, err } - return db.encodeScanKey(dataType, key) + return db.encodeScanKey(storeDataType, key) } - k, err := db.encodeScanKey(dataType, nil) + k, err := db.encodeScanKey(storeDataType, nil) if err != nil { return nil, err } - k[len(k)-1] = dataType + 1 + k[len(k)-1] = storeDataType + 1 return k, nil } -func (db *DB) encodeScanKey(dataType byte, key []byte) ([]byte, error) { - switch dataType { +func (db *DB) encodeScanKey(storeDataType byte, key []byte) ([]byte, error) { + switch storeDataType { case KVType: return db.encodeKVKey(key), nil case LMetaType: @@ -120,17 +160,137 @@ func (db *DB) encodeScanKey(dataType byte, key []byte) ([]byte, error) { return db.hEncodeSizeKey(key), nil case ZSizeType: return db.zEncodeSizeKey(key), nil - case BitMetaType: - return db.bEncodeMetaKey(key), nil case SSizeType: return db.sEncodeSizeKey(key), nil + // case BitMetaType: + // return db.bEncodeMetaKey(key), nil default: return nil, errDataType } } -func (db *DB) decodeScanKey(dataType byte, ek []byte) ([]byte, error) { - if len(ek) < 2 || ek[0] != db.index || ek[1] != dataType { +func (db *DB) decodeScanKey(storeDataType byte, ek []byte) ([]byte, error) { + if len(ek) < 2 || ek[0] != db.index || ek[1] != storeDataType { return nil, errMetaKey } return ek[2:], nil } + +// for specail data scan + +func (db *DB) buildDataScanIterator(start []byte, stop []byte, inclusive bool) *store.RangeLimitIterator { + tp := store.RangeROpen + + if !inclusive { + tp = store.RangeOpen + } + it := db.bucket.RangeIterator(start, stop, tp) + return it + +} + +func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + start := db.hEncodeHashKey(key, cursor) + stop := db.hEncodeStopKey(key) + + v := make([]FVPair, 0, 16) + + r, err := buildMatchRegexp(match) + if err != nil { + return nil, err + } + + it := db.buildDataScanIterator(start, stop, inclusive) + defer it.Close() + + for i := 0; it.Valid() && i < count; it.Next() { + _, f, err := db.hDecodeHashKey(it.Key()) + if err != nil { + return nil, err + } else if r != nil && !r.Match(f) { + continue + } + + v = append(v, FVPair{Field: f, Value: it.Value()}) + + i++ + } + + return v, nil +} + +func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + start := db.sEncodeSetKey(key, cursor) + stop := db.sEncodeStopKey(key) + + v := make([][]byte, 0, 16) + + r, err := buildMatchRegexp(match) + if err != nil { + return nil, err + } + + it := db.buildDataScanIterator(start, stop, inclusive) + defer it.Close() + + for i := 0; it.Valid() && i < count; it.Next() { + _, m, err := db.sDecodeSetKey(it.Key()) + if err != nil { + return nil, err + } else if r != nil && !r.Match(m) { + continue + } + + v = append(v, m) + + i++ + } + + return v, nil +} + +func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + start := db.zEncodeSetKey(key, cursor) + stop := db.zEncodeStopSetKey(key) + + v := make([]ScorePair, 0, 16) + + r, err := buildMatchRegexp(match) + if err != nil { + return nil, err + } + + it := db.buildDataScanIterator(start, stop, inclusive) + defer it.Close() + + for i := 0; it.Valid() && i < count; it.Next() { + _, m, err := db.zDecodeSetKey(it.Key()) + if err != nil { + return nil, err + } else if r != nil && !r.Match(m) { + continue + } + + score, err := Int64(it.Value(), nil) + if err != nil { + return nil, err + } + + v = append(v, ScorePair{Score: score, Member: m}) + + i++ + } + + return v, nil +} diff --git a/ledis/scan_test.go b/ledis/scan_test.go index b2a0970..9505964 100644 --- a/ledis/scan_test.go +++ b/ledis/scan_test.go @@ -21,13 +21,13 @@ func TestDBScan(t *testing.T) { db.FlushAll() - if v, err := db.Scan(nil, 10, true, ""); err != nil { + if v, err := db.Scan(KV, nil, 10, true, ""); err != nil { t.Fatal(err) } else if len(v) != 0 { t.Fatal(len(v)) } - if v, err := db.RevScan(nil, 10, true, ""); err != nil { + if v, err := db.RevScan(KV, nil, 10, true, ""); err != nil { t.Fatal(err) } else if len(v) != 0 { t.Fatal(len(v)) @@ -37,73 +37,73 @@ func TestDBScan(t *testing.T) { db.Set([]byte("b"), []byte{}) db.Set([]byte("c"), []byte{}) - if v, err := db.Scan(nil, 1, true, ""); err != nil { + if v, err := db.Scan(KV, nil, 1, true, ""); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "a") } - if v, err := db.Scan([]byte("a"), 2, false, ""); err != nil { + if v, err := db.Scan(KV, []byte("a"), 2, false, ""); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "b", "c") } - if v, err := db.Scan(nil, 3, true, ""); err != nil { + if v, err := db.Scan(KV, nil, 3, true, ""); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "a", "b", "c") } - if v, err := db.Scan(nil, 3, true, "b"); err != nil { + if v, err := db.Scan(KV, nil, 3, true, "b"); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "b") } - if v, err := db.Scan(nil, 3, true, "."); err != nil { + if v, err := db.Scan(KV, nil, 3, true, "."); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "a", "b", "c") } - if v, err := db.Scan(nil, 3, true, "a+"); err != nil { + if v, err := db.Scan(KV, nil, 3, true, "a+"); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "a") } - if v, err := db.RevScan(nil, 1, true, ""); err != nil { + if v, err := db.RevScan(KV, nil, 1, true, ""); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "c") } - if v, err := db.RevScan([]byte("c"), 2, false, ""); err != nil { + if v, err := db.RevScan(KV, []byte("c"), 2, false, ""); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "b", "a") } - if v, err := db.RevScan(nil, 3, true, ""); err != nil { + if v, err := db.RevScan(KV, nil, 3, true, ""); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "c", "b", "a") } - if v, err := db.RevScan(nil, 3, true, "b"); err != nil { + if v, err := db.RevScan(KV, nil, 3, true, "b"); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "b") } - if v, err := db.RevScan(nil, 3, true, "."); err != nil { + if v, err := db.RevScan(KV, nil, 3, true, "."); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "c", "b", "a") } - if v, err := db.RevScan(nil, 3, true, "c+"); err != nil { + if v, err := db.RevScan(KV, nil, 3, true, "c+"); err != nil { t.Fatal(err) } else { checkTestScan(t, v, "c") @@ -111,7 +111,7 @@ func TestDBScan(t *testing.T) { } -func TestDBHScan(t *testing.T) { +func TestDBHKeyScan(t *testing.T) { db := getTestDB() db.hFlush() @@ -125,7 +125,7 @@ func TestDBHScan(t *testing.T) { k3 := []byte("k3") db.HSet(k3, []byte("3"), []byte{}) - if v, err := db.HScan(nil, 1, true, ""); err != nil { + if v, err := db.Scan(HASH, nil, 1, true, ""); err != nil { t.Fatal(err) } else if len(v) != 1 { t.Fatal("invalid length ", len(v)) @@ -133,7 +133,7 @@ func TestDBHScan(t *testing.T) { t.Fatal("invalid value ", string(v[0])) } - if v, err := db.HScan(k1, 2, true, ""); err != nil { + if v, err := db.Scan(HASH, k1, 2, true, ""); err != nil { t.Fatal(err) } else if len(v) != 2 { t.Fatal("invalid length ", len(v)) @@ -143,7 +143,7 @@ func TestDBHScan(t *testing.T) { t.Fatal("invalid value ", string(v[1])) } - if v, err := db.HScan(k1, 2, false, ""); err != nil { + if v, err := db.Scan(HASH, k1, 2, false, ""); err != nil { t.Fatal(err) } else if len(v) != 2 { t.Fatal("invalid length ", len(v)) @@ -155,7 +155,7 @@ func TestDBHScan(t *testing.T) { } -func TestDBZScan(t *testing.T) { +func TestDBZKeyScan(t *testing.T) { db := getTestDB() db.zFlush() @@ -169,7 +169,7 @@ func TestDBZScan(t *testing.T) { k3 := []byte("k3") db.ZAdd(k3, ScorePair{3, []byte("m")}) - if v, err := db.ZScan(nil, 1, true, ""); err != nil { + if v, err := db.Scan(ZSET, nil, 1, true, ""); err != nil { t.Fatal(err) } else if len(v) != 1 { t.Fatal("invalid length ", len(v)) @@ -177,7 +177,7 @@ func TestDBZScan(t *testing.T) { t.Fatal("invalid value ", string(v[0])) } - if v, err := db.ZScan(k1, 2, true, ""); err != nil { + if v, err := db.Scan(ZSET, k1, 2, true, ""); err != nil { t.Fatal(err) } else if len(v) != 2 { t.Fatal("invalid length ", len(v)) @@ -187,7 +187,7 @@ func TestDBZScan(t *testing.T) { t.Fatal("invalid value ", string(v[1])) } - if v, err := db.ZScan(k1, 2, false, ""); err != nil { + if v, err := db.Scan(ZSET, k1, 2, false, ""); err != nil { t.Fatal(err) } else if len(v) != 2 { t.Fatal("invalid length ", len(v)) @@ -199,7 +199,7 @@ func TestDBZScan(t *testing.T) { } -func TestDBLScan(t *testing.T) { +func TestDBLKeyScan(t *testing.T) { db := getTestDB() db.lFlush() @@ -219,7 +219,7 @@ func TestDBLScan(t *testing.T) { t.Fatal(err.Error()) } - if v, err := db.LScan(nil, 1, true, ""); err != nil { + if v, err := db.Scan(LIST, nil, 1, true, ""); err != nil { t.Fatal(err) } else if len(v) != 1 { t.Fatal("invalid length ", len(v)) @@ -227,7 +227,7 @@ func TestDBLScan(t *testing.T) { t.Fatal("invalid value ", string(v[0])) } - if v, err := db.LScan(k1, 2, true, ""); err != nil { + if v, err := db.Scan(LIST, k1, 2, true, ""); err != nil { t.Fatal(err) } else if len(v) != 2 { t.Fatal("invalid length ", len(v)) @@ -237,7 +237,7 @@ func TestDBLScan(t *testing.T) { t.Fatal("invalid value ", string(v[1])) } - if v, err := db.LScan(k1, 2, false, ""); err != nil { + if v, err := db.Scan(LIST, k1, 2, false, ""); err != nil { t.Fatal(err) } else if len(v) != 2 { t.Fatal("invalid length ", len(v)) @@ -249,60 +249,10 @@ func TestDBLScan(t *testing.T) { } -func TestDBBScan(t *testing.T) { - // db := getTestDB() - - // db.bFlush() - - // k1 := []byte("k1") - // if _, err := db.BSetBit(k1, 1, 1); err != nil { - // t.Fatal(err.Error()) - // } - - // k2 := []byte("k2") - // if _, err := db.BSetBit(k2, 1, 1); err != nil { - // t.Fatal(err.Error()) - // } - // k3 := []byte("k3") - - // if _, err := db.BSetBit(k3, 1, 0); err != nil { - // t.Fatal(err.Error()) - // } - - // if v, err := db.BScan(nil, 1, true, ""); err != nil { - // t.Fatal(err) - // } else if len(v) != 1 { - // t.Fatal("invalid length ", len(v)) - // } else if string(v[0]) != "k1" { - // t.Fatal("invalid value ", string(v[0])) - // } - - // if v, err := db.BScan(k1, 2, true, ""); err != nil { - // t.Fatal(err) - // } else if len(v) != 2 { - // t.Fatal("invalid length ", len(v)) - // } else if string(v[0]) != "k1" { - // t.Fatal("invalid value ", string(v[0])) - // } else if string(v[1]) != "k2" { - // t.Fatal("invalid value ", string(v[1])) - // } - - // if v, err := db.BScan(k1, 2, false, ""); err != nil { - // t.Fatal(err) - // } else if len(v) != 2 { - // t.Fatal("invalid length ", len(v)) - // } else if string(v[0]) != "k2" { - // t.Fatal("invalid value ", string(v[0])) - // } else if string(v[1]) != "k3" { - // t.Fatal("invalid value ", string(v[1])) - // } - -} - -func TestDBSScan(t *testing.T) { +func TestDBSKeyScan(t *testing.T) { db := getTestDB() - db.bFlush() + db.sFlush() k1 := []byte("k1") if _, err := db.SAdd(k1, []byte("1")); err != nil { @@ -319,7 +269,7 @@ func TestDBSScan(t *testing.T) { t.Fatal(err.Error()) } - if v, err := db.SScan(nil, 1, true, ""); err != nil { + if v, err := db.Scan(SET, nil, 1, true, ""); err != nil { t.Fatal(err) } else if len(v) != 1 { t.Fatal("invalid length ", len(v)) @@ -327,7 +277,7 @@ func TestDBSScan(t *testing.T) { t.Fatal("invalid value ", string(v[0])) } - if v, err := db.SScan(k1, 2, true, ""); err != nil { + if v, err := db.Scan(SET, k1, 2, true, ""); err != nil { t.Fatal(err) } else if len(v) != 2 { t.Fatal("invalid length ", len(v)) @@ -337,7 +287,7 @@ func TestDBSScan(t *testing.T) { t.Fatal("invalid value ", string(v[1])) } - if v, err := db.SScan(k1, 2, false, ""); err != nil { + if v, err := db.Scan(SET, k1, 2, false, ""); err != nil { t.Fatal(err) } else if len(v) != 2 { t.Fatal("invalid length ", len(v)) @@ -346,5 +296,77 @@ func TestDBSScan(t *testing.T) { } else if string(v[1]) != "k3" { t.Fatal("invalid value ", string(v[1])) } - +} + +func TestDBHScan(t *testing.T) { + db := getTestDB() + + key := []byte("scan_h_key") + value := []byte("hello world") + db.HSet(key, []byte("1"), value) + db.HSet(key, []byte("222"), value) + db.HSet(key, []byte("19"), value) + db.HSet(key, []byte("1234"), value) + + v, err := db.HScan(key, nil, 100, true, "") + if err != nil { + t.Fatal(err) + } else if len(v) != 4 { + t.Fatal("invalid count", len(v)) + } + + v, err = db.HScan(key, []byte("19"), 1, false, "") + if err != nil { + t.Fatal(err) + } else if len(v) != 1 { + t.Fatal("invalid count", len(v)) + } else if string(v[0].Field) != "222" { + t.Fatal(string(v[0].Field)) + } +} + +func TestDBSScan(t *testing.T) { + db := getTestDB() + key := []byte("scan_s_key") + + db.SAdd(key, []byte("1"), []byte("222"), []byte("19"), []byte("1234")) + + v, err := db.SScan(key, nil, 100, true, "") + if err != nil { + t.Fatal(err) + } else if len(v) != 4 { + t.Fatal("invalid count", len(v)) + } + + v, err = db.SScan(key, []byte("19"), 1, false, "") + if err != nil { + t.Fatal(err) + } else if len(v) != 1 { + t.Fatal("invalid count", len(v)) + } else if string(v[0]) != "222" { + t.Fatal(string(v[0])) + } +} + +func TestDBZScan(t *testing.T) { + db := getTestDB() + key := []byte("scan_z_key") + + db.ZAdd(key, ScorePair{1, []byte("1")}, ScorePair{2, []byte("222")}, ScorePair{3, []byte("19")}, ScorePair{4, []byte("1234")}) + + v, err := db.ZScan(key, nil, 100, true, "") + if err != nil { + t.Fatal(err) + } else if len(v) != 4 { + t.Fatal("invalid count", len(v)) + } + + v, err = db.ZScan(key, []byte("19"), 1, false, "") + if err != nil { + t.Fatal(err) + } else if len(v) != 1 { + t.Fatal("invalid count", len(v)) + } else if string(v[0].Member) != "222" { + t.Fatal(string(v[0].Member)) + } } diff --git a/ledis/t_bit.go b/ledis/t_bit.go index cff51d6..ebb099c 100644 --- a/ledis/t_bit.go +++ b/ledis/t_bit.go @@ -1,941 +1,931 @@ package ledis -import ( - "encoding/binary" - "errors" - "github.com/siddontang/go/log" - "github.com/siddontang/go/num" - "github.com/siddontang/ledisdb/store" - "sort" - "time" -) - -/* - We will not maintain bitmap anymore, and will add bit operations for kv type later. - Use your own risk. -*/ - -const ( - OPand uint8 = iota + 1 - OPor - OPxor - OPnot -) - -type BitPair struct { - Pos int32 - Val uint8 -} - -type segBitInfo struct { - Seq uint32 - Off uint32 - Val uint8 -} - -type segBitInfoArray []segBitInfo - -const ( - // byte - segByteWidth uint32 = 9 - segByteSize uint32 = 1 << segByteWidth - - // bit - segBitWidth uint32 = segByteWidth + 3 - segBitSize uint32 = segByteSize << 3 - - maxByteSize uint32 = 8 << 20 - maxSegCount uint32 = maxByteSize / segByteSize - - minSeq uint32 = 0 - maxSeq uint32 = uint32((maxByteSize << 3) - 1) -) - -var fillBits = [...]uint8{1, 3, 7, 15, 31, 63, 127, 255} - -var emptySegment []byte = make([]byte, segByteSize, segByteSize) - -var fillSegment []byte = func() []byte { - data := make([]byte, segByteSize, segByteSize) - for i := uint32(0); i < segByteSize; i++ { - data[i] = 0xff - } - return data -}() - -var errBinKey = errors.New("invalid bin key") -var errOffset = errors.New("invalid offset") -var errDuplicatePos = errors.New("duplicate bit pos") - -func getBit(sz []byte, offset uint32) uint8 { - index := offset >> 3 - if index >= uint32(len(sz)) { - return 0 // error("overflow") - } - - offset -= index << 3 - return sz[index] >> offset & 1 -} - -func setBit(sz []byte, offset uint32, val uint8) bool { - if val != 1 && val != 0 { - return false // error("invalid val") - } - - index := offset >> 3 - if index >= uint32(len(sz)) { - return false // error("overflow") - } - - offset -= index << 3 - if sz[index]>>offset&1 != val { - sz[index] ^= (1 << offset) - } - return true -} - -func (datas segBitInfoArray) Len() int { - return len(datas) -} - -func (datas segBitInfoArray) Less(i, j int) bool { - res := (datas)[i].Seq < (datas)[j].Seq - if !res && (datas)[i].Seq == (datas)[j].Seq { - res = (datas)[i].Off < (datas)[j].Off - } - return res -} - -func (datas segBitInfoArray) Swap(i, j int) { - datas[i], datas[j] = datas[j], datas[i] -} - -func (db *DB) bEncodeMetaKey(key []byte) []byte { - mk := make([]byte, len(key)+2) - mk[0] = db.index - mk[1] = BitMetaType - - copy(mk[2:], key) - return mk -} - -func (db *DB) bDecodeMetaKey(bkey []byte) ([]byte, error) { - if len(bkey) < 2 || bkey[0] != db.index || bkey[1] != BitMetaType { - return nil, errBinKey - } - - return bkey[2:], nil -} - -func (db *DB) bEncodeBinKey(key []byte, seq uint32) []byte { - bk := make([]byte, len(key)+8) - - pos := 0 - bk[pos] = db.index - pos++ - bk[pos] = BitType - pos++ - - binary.BigEndian.PutUint16(bk[pos:], uint16(len(key))) - pos += 2 - - copy(bk[pos:], key) - pos += len(key) - - binary.BigEndian.PutUint32(bk[pos:], seq) - - return bk -} - -func (db *DB) bDecodeBinKey(bkey []byte) (key []byte, seq uint32, err error) { - if len(bkey) < 8 || bkey[0] != db.index { - err = errBinKey - return - } - - keyLen := binary.BigEndian.Uint16(bkey[2:4]) - if int(keyLen+8) != len(bkey) { - err = errBinKey - return - } - - key = bkey[4 : 4+keyLen] - seq = uint32(binary.BigEndian.Uint32(bkey[4+keyLen:])) - return -} - -func (db *DB) bCapByteSize(seq uint32, off uint32) uint32 { - var offByteSize uint32 = (off >> 3) + 1 - if offByteSize > segByteSize { - offByteSize = segByteSize - } - - return seq<= 0 { - offset += int32((uint32(tailSeq)<> segBitWidth - off &= (segBitSize - 1) - return -} - -func (db *DB) bGetMeta(key []byte) (tailSeq int32, tailOff int32, err error) { - var v []byte - - mk := db.bEncodeMetaKey(key) - v, err = db.bucket.Get(mk) - if err != nil { - return - } - - if v != nil { - tailSeq = int32(binary.LittleEndian.Uint32(v[0:4])) - tailOff = int32(binary.LittleEndian.Uint32(v[4:8])) - } else { - tailSeq = -1 - tailOff = -1 - } - return -} - -func (db *DB) bSetMeta(t *batch, key []byte, tailSeq uint32, tailOff uint32) { - ek := db.bEncodeMetaKey(key) - - buf := make([]byte, 8) - binary.LittleEndian.PutUint32(buf[0:4], tailSeq) - binary.LittleEndian.PutUint32(buf[4:8], tailOff) - - t.Put(ek, buf) - return -} - -func (db *DB) bUpdateMeta(t *batch, key []byte, seq uint32, off uint32) (tailSeq uint32, tailOff uint32, err error) { - var tseq, toff int32 - var update bool = false - - if tseq, toff, err = db.bGetMeta(key); err != nil { - return - } else if tseq < 0 { - update = true - } else { - tailSeq = uint32(num.MaxInt32(tseq, 0)) - tailOff = uint32(num.MaxInt32(toff, 0)) - update = (seq > tailSeq || (seq == tailSeq && off > tailOff)) - } - - if update { - db.bSetMeta(t, key, seq, off) - tailSeq = seq - tailOff = off - } - return -} - -func (db *DB) bDelete(t *batch, key []byte) (drop int64) { - mk := db.bEncodeMetaKey(key) - t.Delete(mk) - - minKey := db.bEncodeBinKey(key, minSeq) - maxKey := db.bEncodeBinKey(key, maxSeq) - it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose) - for ; it.Valid(); it.Next() { - t.Delete(it.RawKey()) - drop++ - } - it.Close() - - return drop -} - -func (db *DB) bGetSegment(key []byte, seq uint32) ([]byte, []byte, error) { - bk := db.bEncodeBinKey(key, seq) - segment, err := db.bucket.Get(bk) - if err != nil { - return bk, nil, err - } - return bk, segment, nil -} - -func (db *DB) bAllocateSegment(key []byte, seq uint32) ([]byte, []byte, error) { - bk, segment, err := db.bGetSegment(key, seq) - if err == nil && segment == nil { - segment = make([]byte, segByteSize, segByteSize) - } - return bk, segment, err -} - -func (db *DB) bIterator(key []byte) *store.RangeLimitIterator { - sk := db.bEncodeBinKey(key, minSeq) - ek := db.bEncodeBinKey(key, maxSeq) - return db.bucket.RangeIterator(sk, ek, store.RangeClose) -} - -func (db *DB) bSegAnd(a []byte, b []byte, res *[]byte) { - if a == nil || b == nil { - *res = nil - return - } - - data := *res - if data == nil { - data = make([]byte, segByteSize, segByteSize) - *res = data - } - - for i := uint32(0); i < segByteSize; i++ { - data[i] = a[i] & b[i] - } - return -} - -func (db *DB) bSegOr(a []byte, b []byte, res *[]byte) { - if a == nil || b == nil { - if a == nil && b == nil { - *res = nil - } else if a == nil { - *res = b - } else { - *res = a - } - return - } - - data := *res - if data == nil { - data = make([]byte, segByteSize, segByteSize) - *res = data - } - - for i := uint32(0); i < segByteSize; i++ { - data[i] = a[i] | b[i] - } - return -} - -func (db *DB) bSegXor(a []byte, b []byte, res *[]byte) { - if a == nil && b == nil { - *res = fillSegment - return - } - - if a == nil { - a = emptySegment - } - - if b == nil { - b = emptySegment - } - - data := *res - if data == nil { - data = make([]byte, segByteSize, segByteSize) - *res = data - } - - for i := uint32(0); i < segByteSize; i++ { - data[i] = a[i] ^ b[i] - } - - return -} - -func (db *DB) bExpireAt(key []byte, when int64) (int64, error) { - t := db.binBatch - t.Lock() - defer t.Unlock() - - if seq, _, err := db.bGetMeta(key); err != nil || seq < 0 { - return 0, err - } else { - db.expireAt(t, BitType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } - } - return 1, nil -} - -func (db *DB) bCountByte(val byte, soff uint32, eoff uint32) int32 { - if soff > eoff { - soff, eoff = eoff, soff - } - - mask := uint8(0) - if soff > 0 { - mask |= fillBits[soff-1] - } - if eoff < 7 { - mask |= (fillBits[7] ^ fillBits[eoff]) - } - mask = fillBits[7] ^ mask - - return bitsInByte[val&mask] -} - -func (db *DB) bCountSeg(key []byte, seq uint32, soff uint32, eoff uint32) (cnt int32, err error) { - if soff >= segBitSize || soff < 0 || - eoff >= segBitSize || eoff < 0 { - return - } - - var segment []byte - if _, segment, err = db.bGetSegment(key, seq); err != nil { - return - } - - if segment == nil { - return - } - - if soff > eoff { - soff, eoff = eoff, soff - } - - headIdx := int(soff >> 3) - endIdx := int(eoff >> 3) - sByteOff := soff - ((soff >> 3) << 3) - eByteOff := eoff - ((eoff >> 3) << 3) - - if headIdx == endIdx { - cnt = db.bCountByte(segment[headIdx], sByteOff, eByteOff) - } else { - cnt = db.bCountByte(segment[headIdx], sByteOff, 7) + - db.bCountByte(segment[endIdx], 0, eByteOff) - } - - // sum up following bytes - for idx, end := headIdx+1, endIdx-1; idx <= end; idx += 1 { - cnt += bitsInByte[segment[idx]] - if idx == end { - break - } - } - - return -} - -func (db *DB) BGet(key []byte) (data []byte, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if err = checkKeySize(key); err != nil { - return - } - - var ts, to int32 - if ts, to, err = db.bGetMeta(key); err != nil || ts < 0 { - return - } - - var tailSeq, tailOff = uint32(ts), uint32(to) - var capByteSize uint32 = db.bCapByteSize(tailSeq, tailOff) - data = make([]byte, capByteSize, capByteSize) - - minKey := db.bEncodeBinKey(key, minSeq) - maxKey := db.bEncodeBinKey(key, tailSeq) - it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose) - - var seq, s, e uint32 - for ; it.Valid(); it.Next() { - if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { - data = nil - break - } - - s = seq << segByteWidth - e = num.MinUint32(s+segByteSize, capByteSize) - copy(data[s:e], it.RawValue()) - } - it.Close() - - return -} - -func (db *DB) BDelete(key []byte) (drop int64, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if err = checkKeySize(key); err != nil { - return - } - - t := db.binBatch - t.Lock() - defer t.Unlock() - - drop = db.bDelete(t, key) - db.rmExpire(t, BitType, key) - - err = t.Commit() - return -} - -func (db *DB) BSetBit(key []byte, offset int32, val uint8) (ori uint8, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if err = checkKeySize(key); err != nil { - return - } - - // todo : check offset - var seq, off uint32 - if seq, off, err = db.bParseOffset(key, offset); err != nil { - return 0, err - } - - var bk, segment []byte - if bk, segment, err = db.bAllocateSegment(key, seq); err != nil { - return 0, err - } - - if segment != nil { - ori = getBit(segment, off) - if setBit(segment, off, val) { - t := db.binBatch - t.Lock() - defer t.Unlock() - - t.Put(bk, segment) - if _, _, e := db.bUpdateMeta(t, key, seq, off); e != nil { - err = e - return - } - - err = t.Commit() - } - } - - return -} - -func (db *DB) BMSetBit(key []byte, args ...BitPair) (place int64, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if err = checkKeySize(key); err != nil { - return - } - - // (ps : so as to aviod wasting memory copy while calling db.Get() and batch.Put(), - // here we sequence the params by pos, so that we can merge the execution of - // diff pos setting which targets on the same segment respectively. ) - - // #1 : sequence request data - var argCnt = len(args) - var bitInfos segBitInfoArray = make(segBitInfoArray, argCnt) - var seq, off uint32 - - for i, info := range args { - if seq, off, err = db.bParseOffset(key, info.Pos); err != nil { - return - } - - bitInfos[i].Seq = seq - bitInfos[i].Off = off - bitInfos[i].Val = info.Val - } - - sort.Sort(bitInfos) - - for i := 1; i < argCnt; i++ { - if bitInfos[i].Seq == bitInfos[i-1].Seq && bitInfos[i].Off == bitInfos[i-1].Off { - return 0, errDuplicatePos - } - } - - // #2 : execute bit set in order - t := db.binBatch - t.Lock() - defer t.Unlock() - - var curBinKey, curSeg []byte - var curSeq, maxSeq, maxOff uint32 - - for _, info := range bitInfos { - if curSeg != nil && info.Seq != curSeq { - t.Put(curBinKey, curSeg) - curSeg = nil - } - - if curSeg == nil { - curSeq = info.Seq - if curBinKey, curSeg, err = db.bAllocateSegment(key, info.Seq); err != nil { - return - } - - if curSeg == nil { - continue - } - } - - if setBit(curSeg, info.Off, info.Val) { - maxSeq = info.Seq - maxOff = info.Off - place++ - } - } - - if curSeg != nil { - t.Put(curBinKey, curSeg) - } - - // finally, update meta - if place > 0 { - if _, _, err = db.bUpdateMeta(t, key, maxSeq, maxOff); err != nil { - return - } - - err = t.Commit() - } - - return -} - -func (db *DB) BGetBit(key []byte, offset int32) (uint8, error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if seq, off, err := db.bParseOffset(key, offset); err != nil { - return 0, err - } else { - _, segment, err := db.bGetSegment(key, seq) - if err != nil { - return 0, err - } - - if segment == nil { - return 0, nil - } else { - return getBit(segment, off), nil - } - } -} - -// func (db *DB) BGetRange(key []byte, start int32, end int32) ([]byte, error) { -// section := make([]byte) +// import ( +// "encoding/binary" +// "errors" +// "github.com/siddontang/go/log" +// "github.com/siddontang/go/num" +// "github.com/siddontang/ledisdb/store" +// "sort" +// "time" +// ) + +// /* +// We will not maintain bitmap anymore, and will add bit operations for kv type later. +// Use your own risk. +// */ + +// const ( +// OPand uint8 = iota + 1 +// OPor +// OPxor +// OPnot +// ) + +// type BitPair struct { +// Pos int32 +// Val uint8 +// } + +// type segBitInfo struct { +// Seq uint32 +// Off uint32 +// Val uint8 +// } + +// type segBitInfoArray []segBitInfo + +// const ( +// // byte +// segByteWidth uint32 = 9 +// segByteSize uint32 = 1 << segByteWidth + +// // bit +// segBitWidth uint32 = segByteWidth + 3 +// segBitSize uint32 = segByteSize << 3 + +// maxByteSize uint32 = 8 << 20 +// maxSegCount uint32 = maxByteSize / segByteSize + +// minSeq uint32 = 0 +// maxSeq uint32 = uint32((maxByteSize << 3) - 1) +// ) + +// var fillBits = [...]uint8{1, 3, 7, 15, 31, 63, 127, 255} + +// var emptySegment []byte = make([]byte, segByteSize, segByteSize) + +// var fillSegment []byte = func() []byte { +// data := make([]byte, segByteSize, segByteSize) +// for i := uint32(0); i < segByteSize; i++ { +// data[i] = 0xff +// } +// return data +// }() + +// var errBinKey = errors.New("invalid bin key") +// var errOffset = errors.New("invalid offset") +// var errDuplicatePos = errors.New("duplicate bit pos") + +// func getBit(sz []byte, offset uint32) uint8 { +// index := offset >> 3 +// if index >= uint32(len(sz)) { +// return 0 // error("overflow") +// } + +// offset -= index << 3 +// return sz[index] >> offset & 1 +// } + +// func setBit(sz []byte, offset uint32, val uint8) bool { +// if val != 1 && val != 0 { +// return false // error("invalid val") +// } + +// index := offset >> 3 +// if index >= uint32(len(sz)) { +// return false // error("overflow") +// } + +// offset -= index << 3 +// if sz[index]>>offset&1 != val { +// sz[index] ^= (1 << offset) +// } +// return true +// } + +// func (datas segBitInfoArray) Len() int { +// return len(datas) +// } + +// func (datas segBitInfoArray) Less(i, j int) bool { +// res := (datas)[i].Seq < (datas)[j].Seq +// if !res && (datas)[i].Seq == (datas)[j].Seq { +// res = (datas)[i].Off < (datas)[j].Off +// } +// return res +// } + +// func (datas segBitInfoArray) Swap(i, j int) { +// datas[i], datas[j] = datas[j], datas[i] +// } + +// func (db *DB) bEncodeMetaKey(key []byte) []byte { +// mk := make([]byte, len(key)+2) +// mk[0] = db.index +// mk[1] = BitMetaType + +// copy(mk[2:], key) +// return mk +// } + +// func (db *DB) bDecodeMetaKey(bkey []byte) ([]byte, error) { +// if len(bkey) < 2 || bkey[0] != db.index || bkey[1] != BitMetaType { +// return nil, errBinKey +// } + +// return bkey[2:], nil +// } + +// func (db *DB) bEncodeBinKey(key []byte, seq uint32) []byte { +// bk := make([]byte, len(key)+8) + +// pos := 0 +// bk[pos] = db.index +// pos++ +// bk[pos] = BitType +// pos++ + +// binary.BigEndian.PutUint16(bk[pos:], uint16(len(key))) +// pos += 2 + +// copy(bk[pos:], key) +// pos += len(key) + +// binary.BigEndian.PutUint32(bk[pos:], seq) + +// return bk +// } + +// func (db *DB) bDecodeBinKey(bkey []byte) (key []byte, seq uint32, err error) { +// if len(bkey) < 8 || bkey[0] != db.index { +// err = errBinKey +// return +// } + +// keyLen := binary.BigEndian.Uint16(bkey[2:4]) +// if int(keyLen+8) != len(bkey) { +// err = errBinKey +// return +// } + +// key = bkey[4 : 4+keyLen] +// seq = uint32(binary.BigEndian.Uint32(bkey[4+keyLen:])) +// return +// } + +// func (db *DB) bCapByteSize(seq uint32, off uint32) uint32 { +// var offByteSize uint32 = (off >> 3) + 1 +// if offByteSize > segByteSize { +// offByteSize = segByteSize +// } + +// return seq<= 0 { +// offset += int32((uint32(tailSeq)<> segBitWidth +// off &= (segBitSize - 1) +// return +// } + +// func (db *DB) bGetMeta(key []byte) (tailSeq int32, tailOff int32, err error) { +// var v []byte + +// mk := db.bEncodeMetaKey(key) +// v, err = db.bucket.Get(mk) +// if err != nil { +// return +// } + +// if v != nil { +// tailSeq = int32(binary.LittleEndian.Uint32(v[0:4])) +// tailOff = int32(binary.LittleEndian.Uint32(v[4:8])) +// } else { +// tailSeq = -1 +// tailOff = -1 +// } +// return +// } + +// func (db *DB) bSetMeta(t *batch, key []byte, tailSeq uint32, tailOff uint32) { +// ek := db.bEncodeMetaKey(key) + +// buf := make([]byte, 8) +// binary.LittleEndian.PutUint32(buf[0:4], tailSeq) +// binary.LittleEndian.PutUint32(buf[4:8], tailOff) + +// t.Put(ek, buf) +// return +// } + +// func (db *DB) bUpdateMeta(t *batch, key []byte, seq uint32, off uint32) (tailSeq uint32, tailOff uint32, err error) { +// var tseq, toff int32 +// var update bool = false + +// if tseq, toff, err = db.bGetMeta(key); err != nil { +// return +// } else if tseq < 0 { +// update = true +// } else { +// tailSeq = uint32(num.MaxInt32(tseq, 0)) +// tailOff = uint32(num.MaxInt32(toff, 0)) +// update = (seq > tailSeq || (seq == tailSeq && off > tailOff)) +// } + +// if update { +// db.bSetMeta(t, key, seq, off) +// tailSeq = seq +// tailOff = off +// } +// return +// } + +// func (db *DB) bDelete(t *batch, key []byte) (drop int64) { +// mk := db.bEncodeMetaKey(key) +// t.Delete(mk) + +// minKey := db.bEncodeBinKey(key, minSeq) +// maxKey := db.bEncodeBinKey(key, maxSeq) +// it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose) +// for ; it.Valid(); it.Next() { +// t.Delete(it.RawKey()) +// drop++ +// } +// it.Close() + +// return drop +// } + +// func (db *DB) bGetSegment(key []byte, seq uint32) ([]byte, []byte, error) { +// bk := db.bEncodeBinKey(key, seq) +// segment, err := db.bucket.Get(bk) +// if err != nil { +// return bk, nil, err +// } +// return bk, segment, nil +// } + +// func (db *DB) bAllocateSegment(key []byte, seq uint32) ([]byte, []byte, error) { +// bk, segment, err := db.bGetSegment(key, seq) +// if err == nil && segment == nil { +// segment = make([]byte, segByteSize, segByteSize) +// } +// return bk, segment, err +// } + +// func (db *DB) bIterator(key []byte) *store.RangeLimitIterator { +// sk := db.bEncodeBinKey(key, minSeq) +// ek := db.bEncodeBinKey(key, maxSeq) +// return db.bucket.RangeIterator(sk, ek, store.RangeClose) +// } + +// func (db *DB) bSegAnd(a []byte, b []byte, res *[]byte) { +// if a == nil || b == nil { +// *res = nil +// return +// } + +// data := *res +// if data == nil { +// data = make([]byte, segByteSize, segByteSize) +// *res = data +// } + +// for i := uint32(0); i < segByteSize; i++ { +// data[i] = a[i] & b[i] +// } +// return +// } + +// func (db *DB) bSegOr(a []byte, b []byte, res *[]byte) { +// if a == nil || b == nil { +// if a == nil && b == nil { +// *res = nil +// } else if a == nil { +// *res = b +// } else { +// *res = a +// } +// return +// } + +// data := *res +// if data == nil { +// data = make([]byte, segByteSize, segByteSize) +// *res = data +// } + +// for i := uint32(0); i < segByteSize; i++ { +// data[i] = a[i] | b[i] +// } +// return +// } + +// func (db *DB) bSegXor(a []byte, b []byte, res *[]byte) { +// if a == nil && b == nil { +// *res = fillSegment +// return +// } + +// if a == nil { +// a = emptySegment +// } + +// if b == nil { +// b = emptySegment +// } + +// data := *res +// if data == nil { +// data = make([]byte, segByteSize, segByteSize) +// *res = data +// } + +// for i := uint32(0); i < segByteSize; i++ { +// data[i] = a[i] ^ b[i] +// } // return // } -func (db *DB) BCount(key []byte, start int32, end int32) (cnt int32, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") +// func (db *DB) bExpireAt(key []byte, when int64) (int64, error) { +// t := db.binBatch +// t.Lock() +// defer t.Unlock() - var sseq, soff uint32 - if sseq, soff, err = db.bParseOffset(key, start); err != nil { - return - } +// if seq, _, err := db.bGetMeta(key); err != nil || seq < 0 { +// return 0, err +// } else { +// db.expireAt(t, BitType, key, when) +// if err := t.Commit(); err != nil { +// return 0, err +// } +// } +// return 1, nil +// } - var eseq, eoff uint32 - if eseq, eoff, err = db.bParseOffset(key, end); err != nil { - return - } +// func (db *DB) bCountByte(val byte, soff uint32, eoff uint32) int32 { +// if soff > eoff { +// soff, eoff = eoff, soff +// } - if sseq > eseq || (sseq == eseq && soff > eoff) { - sseq, eseq = eseq, sseq - soff, eoff = eoff, soff - } +// mask := uint8(0) +// if soff > 0 { +// mask |= fillBits[soff-1] +// } +// if eoff < 7 { +// mask |= (fillBits[7] ^ fillBits[eoff]) +// } +// mask = fillBits[7] ^ mask - var segCnt int32 - if eseq == sseq { - if segCnt, err = db.bCountSeg(key, sseq, soff, eoff); err != nil { - return 0, err - } +// return bitsInByte[val&mask] +// } - cnt = segCnt +// func (db *DB) bCountSeg(key []byte, seq uint32, soff uint32, eoff uint32) (cnt int32, err error) { +// if soff >= segBitSize || soff < 0 || +// eoff >= segBitSize || eoff < 0 { +// return +// } - } else { - if segCnt, err = db.bCountSeg(key, sseq, soff, segBitSize-1); err != nil { - return 0, err - } else { - cnt += segCnt - } +// var segment []byte +// if _, segment, err = db.bGetSegment(key, seq); err != nil { +// return +// } - if segCnt, err = db.bCountSeg(key, eseq, 0, eoff); err != nil { - return 0, err - } else { - cnt += segCnt - } - } +// if segment == nil { +// return +// } - // middle segs - var segment []byte - skey := db.bEncodeBinKey(key, sseq) - ekey := db.bEncodeBinKey(key, eseq) +// if soff > eoff { +// soff, eoff = eoff, soff +// } - it := db.bucket.RangeIterator(skey, ekey, store.RangeOpen) - for ; it.Valid(); it.Next() { - segment = it.RawValue() - for _, bt := range segment { - cnt += bitsInByte[bt] - } - } - it.Close() +// headIdx := int(soff >> 3) +// endIdx := int(eoff >> 3) +// sByteOff := soff - ((soff >> 3) << 3) +// eByteOff := eoff - ((eoff >> 3) << 3) - return -} +// if headIdx == endIdx { +// cnt = db.bCountByte(segment[headIdx], sByteOff, eByteOff) +// } else { +// cnt = db.bCountByte(segment[headIdx], sByteOff, 7) + +// db.bCountByte(segment[endIdx], 0, eByteOff) +// } -func (db *DB) BTail(key []byte) (int32, error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") +// // sum up following bytes +// for idx, end := headIdx+1, endIdx-1; idx <= end; idx += 1 { +// cnt += bitsInByte[segment[idx]] +// if idx == end { +// break +// } +// } - // effective length of data, the highest bit-pos set in history - tailSeq, tailOff, err := db.bGetMeta(key) - if err != nil { - return 0, err - } +// return +// } - tail := int32(-1) - if tailSeq >= 0 { - tail = int32(uint32(tailSeq)< maxDstSeq || (seq == maxDstSeq && off > maxDstOff) { - maxDstSeq = seq - maxDstOff = off - } - } +// t := db.binBatch +// t.Lock() +// defer t.Unlock() - if (op == OPnot && validKeyNum != 1) || - (op != OPnot && validKeyNum < 2) { - return // with not enough existing source key - } +// drop = db.bDelete(t, key) +// db.rmExpire(t, BitType, key) - var srcIdx int - for srcIdx = 0; srcIdx < keyNum; srcIdx++ { - if srckeys[srcIdx] != nil { - break - } - } +// err = t.Commit() +// return +// } - // init - data - var segments = make([][]byte, maxDstSeq+1) +// func (db *DB) BSetBit(key []byte, offset int32, val uint8) (ori uint8, err error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - if op == OPnot { - // ps : - // ( ~num == num ^ 0x11111111 ) - // we init the result segments with all bit set, - // then we can calculate through the way of 'xor'. +// if err = checkKeySize(key); err != nil { +// return +// } - // ahead segments bin format : 1111 ... 1111 - for i := uint32(0); i < maxDstSeq; i++ { - segments[i] = fillSegment - } +// // todo : check offset +// var seq, off uint32 +// if seq, off, err = db.bParseOffset(key, offset); err != nil { +// return 0, err +// } - // last segment bin format : 1111..1100..0000 - var tailSeg = make([]byte, segByteSize, segByteSize) - var fillByte = fillBits[7] - var tailSegLen = db.bCapByteSize(uint32(0), maxDstOff) - for i := uint32(0); i < tailSegLen-1; i++ { - tailSeg[i] = fillByte - } - tailSeg[tailSegLen-1] = fillBits[maxDstOff-(tailSegLen-1)<<3] - segments[maxDstSeq] = tailSeg +// var bk, segment []byte +// if bk, segment, err = db.bAllocateSegment(key, seq); err != nil { +// return 0, err +// } - } else { - // ps : init segments by data corresponding to the 1st valid source key - it := db.bIterator(srckeys[srcIdx]) - for ; it.Valid(); it.Next() { - if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { - // to do ... - it.Close() - return - } - segments[seq] = it.Value() - } - it.Close() - srcIdx++ - } +// if segment != nil { +// ori = getBit(segment, off) +// if setBit(segment, off, val) { +// t := db.binBatch +// t.Lock() +// defer t.Unlock() - // operation with following keys - var res []byte - for i := srcIdx; i < keyNum; i++ { - if srckeys[i] == nil { - continue - } +// t.Put(bk, segment) +// if _, _, e := db.bUpdateMeta(t, key, seq, off); e != nil { +// err = e +// return +// } - it := db.bIterator(srckeys[i]) - for idx, end := uint32(0), false; !end; it.Next() { - end = !it.Valid() - if !end { - if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { - // to do ... - it.Close() - return - } - } else { - seq = maxDstSeq + 1 - } +// err = t.Commit() +// } +// } - // todo : - // operation 'and' can be optimize here : - // if seq > max_segments_idx, this loop can be break, - // which can avoid cost from Key() and bDecodeBinKey() +// return +// } - for ; idx < seq; idx++ { - res = nil - exeOp(segments[idx], nil, &res) - segments[idx] = res - } +// func (db *DB) BMSetBit(key []byte, args ...BitPair) (place int64, err error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - if !end { - res = it.Value() - exeOp(segments[seq], res, &res) - segments[seq] = res - idx++ - } - } - it.Close() - } +// if err = checkKeySize(key); err != nil { +// return +// } - // clear the old data in case - db.bDelete(t, dstkey) - db.rmExpire(t, BitType, dstkey) +// // (ps : so as to aviod wasting memory copy while calling db.Get() and batch.Put(), +// // here we sequence the params by pos, so that we can merge the execution of +// // diff pos setting which targets on the same segment respectively. ) - // set data - db.bSetMeta(t, dstkey, maxDstSeq, maxDstOff) +// // #1 : sequence request data +// var argCnt = len(args) +// var bitInfos segBitInfoArray = make(segBitInfoArray, argCnt) +// var seq, off uint32 - var bk []byte - for seq, segt := range segments { - if segt != nil { - bk = db.bEncodeBinKey(dstkey, uint32(seq)) - t.Put(bk, segt) - } - } +// for i, info := range args { +// if seq, off, err = db.bParseOffset(key, info.Pos); err != nil { +// return +// } - err = t.Commit() - if err == nil { - // blen = int32(db.bCapByteSize(maxDstOff, maxDstOff)) - blen = int32(maxDstSeq< 0 { +// if _, _, err = db.bUpdateMeta(t, key, maxSeq, maxOff); err != nil { +// return +// } - t := db.binBatch - t.Lock() - defer t.Unlock() +// err = t.Commit() +// } - n, err := db.rmExpire(t, BitType, key) - if err != nil { - return 0, err - } +// return +// } - err = t.Commit() - return n, err -} +// func (db *DB) BGetBit(key []byte, offset int32) (uint8, error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") -func (db *DB) BScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - return db.scan(BitMetaType, key, count, inclusive, match) -} +// if seq, off, err := db.bParseOffset(key, offset); err != nil { +// return 0, err +// } else { +// _, segment, err := db.bGetSegment(key, seq) +// if err != nil { +// return 0, err +// } -func (db *DB) BRevScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - return db.revscan(BitMetaType, key, count, inclusive, match) -} +// if segment == nil { +// return 0, nil +// } else { +// return getBit(segment, off), nil +// } +// } +// } -func (db *DB) bFlush() (drop int64, err error) { - t := db.binBatch - t.Lock() - defer t.Unlock() +// // func (db *DB) BGetRange(key []byte, start int32, end int32) ([]byte, error) { +// // section := make([]byte) - return db.flushType(t, BitType) -} +// // return +// // } + +// func (db *DB) BCount(key []byte, start int32, end int32) (cnt int32, err error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") + +// var sseq, soff uint32 +// if sseq, soff, err = db.bParseOffset(key, start); err != nil { +// return +// } + +// var eseq, eoff uint32 +// if eseq, eoff, err = db.bParseOffset(key, end); err != nil { +// return +// } + +// if sseq > eseq || (sseq == eseq && soff > eoff) { +// sseq, eseq = eseq, sseq +// soff, eoff = eoff, soff +// } + +// var segCnt int32 +// if eseq == sseq { +// if segCnt, err = db.bCountSeg(key, sseq, soff, eoff); err != nil { +// return 0, err +// } + +// cnt = segCnt + +// } else { +// if segCnt, err = db.bCountSeg(key, sseq, soff, segBitSize-1); err != nil { +// return 0, err +// } else { +// cnt += segCnt +// } + +// if segCnt, err = db.bCountSeg(key, eseq, 0, eoff); err != nil { +// return 0, err +// } else { +// cnt += segCnt +// } +// } + +// // middle segs +// var segment []byte +// skey := db.bEncodeBinKey(key, sseq) +// ekey := db.bEncodeBinKey(key, eseq) + +// it := db.bucket.RangeIterator(skey, ekey, store.RangeOpen) +// for ; it.Valid(); it.Next() { +// segment = it.RawValue() +// for _, bt := range segment { +// cnt += bitsInByte[bt] +// } +// } +// it.Close() + +// return +// } + +// func (db *DB) BTail(key []byte) (int32, error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") + +// // effective length of data, the highest bit-pos set in history +// tailSeq, tailOff, err := db.bGetMeta(key) +// if err != nil { +// return 0, err +// } + +// tail := int32(-1) +// if tailSeq >= 0 { +// tail = int32(uint32(tailSeq)< maxDstSeq || (seq == maxDstSeq && off > maxDstOff) { +// maxDstSeq = seq +// maxDstOff = off +// } +// } + +// if (op == OPnot && validKeyNum != 1) || +// (op != OPnot && validKeyNum < 2) { +// return // with not enough existing source key +// } + +// var srcIdx int +// for srcIdx = 0; srcIdx < keyNum; srcIdx++ { +// if srckeys[srcIdx] != nil { +// break +// } +// } + +// // init - data +// var segments = make([][]byte, maxDstSeq+1) + +// if op == OPnot { +// // ps : +// // ( ~num == num ^ 0x11111111 ) +// // we init the result segments with all bit set, +// // then we can calculate through the way of 'xor'. + +// // ahead segments bin format : 1111 ... 1111 +// for i := uint32(0); i < maxDstSeq; i++ { +// segments[i] = fillSegment +// } + +// // last segment bin format : 1111..1100..0000 +// var tailSeg = make([]byte, segByteSize, segByteSize) +// var fillByte = fillBits[7] +// var tailSegLen = db.bCapByteSize(uint32(0), maxDstOff) +// for i := uint32(0); i < tailSegLen-1; i++ { +// tailSeg[i] = fillByte +// } +// tailSeg[tailSegLen-1] = fillBits[maxDstOff-(tailSegLen-1)<<3] +// segments[maxDstSeq] = tailSeg + +// } else { +// // ps : init segments by data corresponding to the 1st valid source key +// it := db.bIterator(srckeys[srcIdx]) +// for ; it.Valid(); it.Next() { +// if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { +// // to do ... +// it.Close() +// return +// } +// segments[seq] = it.Value() +// } +// it.Close() +// srcIdx++ +// } + +// // operation with following keys +// var res []byte +// for i := srcIdx; i < keyNum; i++ { +// if srckeys[i] == nil { +// continue +// } + +// it := db.bIterator(srckeys[i]) +// for idx, end := uint32(0), false; !end; it.Next() { +// end = !it.Valid() +// if !end { +// if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { +// // to do ... +// it.Close() +// return +// } +// } else { +// seq = maxDstSeq + 1 +// } + +// // todo : +// // operation 'and' can be optimize here : +// // if seq > max_segments_idx, this loop can be break, +// // which can avoid cost from Key() and bDecodeBinKey() + +// for ; idx < seq; idx++ { +// res = nil +// exeOp(segments[idx], nil, &res) +// segments[idx] = res +// } + +// if !end { +// res = it.Value() +// exeOp(segments[seq], res, &res) +// segments[seq] = res +// idx++ +// } +// } +// it.Close() +// } + +// // clear the old data in case +// db.bDelete(t, dstkey) +// db.rmExpire(t, BitType, dstkey) + +// // set data +// db.bSetMeta(t, dstkey, maxDstSeq, maxDstOff) + +// var bk []byte +// for seq, segt := range segments { +// if segt != nil { +// bk = db.bEncodeBinKey(dstkey, uint32(seq)) +// t.Put(bk, segt) +// } +// } + +// err = t.Commit() +// if err == nil { +// // blen = int32(db.bCapByteSize(maxDstOff, maxDstOff)) +// blen = int32(maxDstSeq<= int(MaxDBNumber) { + if index < 0 || index >= int(tx.l.cfg.Databases) { return fmt.Errorf("invalid db index %d", index) } diff --git a/server/app.go b/server/app.go index 08cd5d0..12ef2d7 100644 --- a/server/app.go +++ b/server/app.go @@ -1,7 +1,7 @@ package server import ( - goledis "github.com/siddontang/ledisdb/client/go/ledis" + goledis "github.com/siddontang/ledisdb/client/goledis" "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/ledis" "net" @@ -157,7 +157,9 @@ func (app *App) Close() { app.closeScript() + app.m.Lock() app.m.Close() + app.m.Unlock() app.snap.Close() diff --git a/server/app_test.go b/server/app_test.go index 781d6fb..3a23538 100644 --- a/server/app_test.go +++ b/server/app_test.go @@ -1,7 +1,7 @@ package server import ( - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "github.com/siddontang/ledisdb/config" "os" "sync" @@ -22,7 +22,8 @@ func newTestLedisClient() { func getTestConn() *ledis.Conn { startTestApp() - return testLedisClient.Get() + conn, _ := testLedisClient.Get() + return conn } func startTestApp() { diff --git a/server/cmd_bit.go b/server/cmd_bit.go index ac577ea..cfe0a45 100644 --- a/server/cmd_bit.go +++ b/server/cmd_bit.go @@ -1,301 +1,289 @@ package server -import ( - "github.com/siddontang/go/hack" +// import ( +// "github.com/siddontang/go/hack" - "github.com/siddontang/ledisdb/ledis" - "strings" -) +// "github.com/siddontang/ledisdb/ledis" +// "strings" +// ) -func bgetCommand(c *client) error { - args := c.args - if len(args) != 1 { - return ErrCmdParams - } +// func bgetCommand(c *client) error { +// args := c.args +// if len(args) != 1 { +// return ErrCmdParams +// } - if v, err := c.db.BGet(args[0]); err != nil { - return err - } else { - c.resp.writeBulk(v) - } - return nil -} +// if v, err := c.db.BGet(args[0]); err != nil { +// return err +// } else { +// c.resp.writeBulk(v) +// } +// return nil +// } -func bdeleteCommand(c *client) error { - args := c.args - if len(args) != 1 { - return ErrCmdParams - } +// func bdeleteCommand(c *client) error { +// args := c.args +// if len(args) != 1 { +// return ErrCmdParams +// } - if n, err := c.db.BDelete(args[0]); err != nil { - return err - } else { - c.resp.writeInteger(n) - } - return nil -} +// if n, err := c.db.BDelete(args[0]); err != nil { +// return err +// } else { +// c.resp.writeInteger(n) +// } +// return nil +// } -func bsetbitCommand(c *client) error { - args := c.args - if len(args) != 3 { - return ErrCmdParams - } +// func bsetbitCommand(c *client) error { +// args := c.args +// if len(args) != 3 { +// return ErrCmdParams +// } - var err error - var offset int32 - var val int8 +// var err error +// var offset int32 +// var val int8 - offset, err = ledis.StrInt32(args[1], nil) +// offset, err = ledis.StrInt32(args[1], nil) - if err != nil { - return ErrOffset - } +// if err != nil { +// return ErrOffset +// } - val, err = ledis.StrInt8(args[2], nil) - if val != 0 && val != 1 { - return ErrBool - } +// val, err = ledis.StrInt8(args[2], nil) +// if val != 0 && val != 1 { +// return ErrBool +// } - if err != nil { - return ErrBool - } +// if err != nil { +// return ErrBool +// } - if ori, err := c.db.BSetBit(args[0], offset, uint8(val)); err != nil { - return err - } else { - c.resp.writeInteger(int64(ori)) - } - return nil -} +// if ori, err := c.db.BSetBit(args[0], offset, uint8(val)); err != nil { +// return err +// } else { +// c.resp.writeInteger(int64(ori)) +// } +// return nil +// } -func bgetbitCommand(c *client) error { - args := c.args - if len(args) != 2 { - return ErrCmdParams - } +// func bgetbitCommand(c *client) error { +// args := c.args +// if len(args) != 2 { +// return ErrCmdParams +// } - offset, err := ledis.StrInt32(args[1], nil) +// offset, err := ledis.StrInt32(args[1], nil) - if err != nil { - return ErrOffset - } +// if err != nil { +// return ErrOffset +// } - if v, err := c.db.BGetBit(args[0], offset); err != nil { - return err - } else { - c.resp.writeInteger(int64(v)) - } - return nil -} +// if v, err := c.db.BGetBit(args[0], offset); err != nil { +// return err +// } else { +// c.resp.writeInteger(int64(v)) +// } +// return nil +// } -func bmsetbitCommand(c *client) error { - args := c.args - if len(args) < 3 { - return ErrCmdParams - } +// func bmsetbitCommand(c *client) error { +// args := c.args +// if len(args) < 3 { +// return ErrCmdParams +// } - key := args[0] - if len(args[1:])&1 != 0 { - return ErrCmdParams - } else { - args = args[1:] - } +// key := args[0] +// if len(args[1:])&1 != 0 { +// return ErrCmdParams +// } else { +// args = args[1:] +// } - var err error - var offset int32 - var val int8 +// var err error +// var offset int32 +// var val int8 - pairs := make([]ledis.BitPair, len(args)>>1) - for i := 0; i < len(pairs); i++ { - offset, err = ledis.StrInt32(args[i<<1], nil) +// pairs := make([]ledis.BitPair, len(args)>>1) +// for i := 0; i < len(pairs); i++ { +// offset, err = ledis.StrInt32(args[i<<1], nil) - if err != nil { - return ErrOffset - } +// if err != nil { +// return ErrOffset +// } - val, err = ledis.StrInt8(args[i<<1+1], nil) - if val != 0 && val != 1 { - return ErrBool - } +// val, err = ledis.StrInt8(args[i<<1+1], nil) +// if val != 0 && val != 1 { +// return ErrBool +// } - if err != nil { - return ErrBool - } +// if err != nil { +// return ErrBool +// } - pairs[i].Pos = offset - pairs[i].Val = uint8(val) - } +// pairs[i].Pos = offset +// pairs[i].Val = uint8(val) +// } - if place, err := c.db.BMSetBit(key, pairs...); err != nil { - return err - } else { - c.resp.writeInteger(place) - } - return nil -} +// if place, err := c.db.BMSetBit(key, pairs...); err != nil { +// return err +// } else { +// c.resp.writeInteger(place) +// } +// return nil +// } -func bcountCommand(c *client) error { - args := c.args - argCnt := len(args) +// func bcountCommand(c *client) error { +// args := c.args +// argCnt := len(args) - if !(argCnt > 0 && argCnt <= 3) { - return ErrCmdParams - } +// if !(argCnt > 0 && argCnt <= 3) { +// return ErrCmdParams +// } - // BCount(key []byte, start int32, end int32) (cnt int32, err error) { +// // BCount(key []byte, start int32, end int32) (cnt int32, err error) { - var err error - var start, end int32 = 0, -1 +// var err error +// var start, end int32 = 0, -1 - if argCnt > 1 { - start, err = ledis.StrInt32(args[1], nil) - if err != nil { - return ErrValue - } - } +// if argCnt > 1 { +// start, err = ledis.StrInt32(args[1], nil) +// if err != nil { +// return ErrValue +// } +// } - if argCnt > 2 { - end, err = ledis.StrInt32(args[2], nil) - if err != nil { - return ErrValue - } - } +// if argCnt > 2 { +// end, err = ledis.StrInt32(args[2], nil) +// if err != nil { +// return ErrValue +// } +// } - if cnt, err := c.db.BCount(args[0], start, end); err != nil { - return err - } else { - c.resp.writeInteger(int64(cnt)) - } - return nil -} +// if cnt, err := c.db.BCount(args[0], start, end); err != nil { +// return err +// } else { +// c.resp.writeInteger(int64(cnt)) +// } +// return nil +// } -func boptCommand(c *client) error { - args := c.args - if len(args) < 2 { - return ErrCmdParams - } +// func boptCommand(c *client) error { +// args := c.args +// if len(args) < 2 { +// return ErrCmdParams +// } - opDesc := strings.ToLower(hack.String(args[0])) - dstKey := args[1] - srcKeys := args[2:] +// opDesc := strings.ToLower(hack.String(args[0])) +// dstKey := args[1] +// srcKeys := args[2:] - var op uint8 - switch opDesc { - case "and": - op = ledis.OPand - case "or": - op = ledis.OPor - case "xor": - op = ledis.OPxor - case "not": - op = ledis.OPnot - default: - return ErrCmdParams - } +// var op uint8 +// switch opDesc { +// case "and": +// op = ledis.OPand +// case "or": +// op = ledis.OPor +// case "xor": +// op = ledis.OPxor +// case "not": +// op = ledis.OPnot +// default: +// return ErrCmdParams +// } - if len(srcKeys) == 0 { - return ErrCmdParams - } - if blen, err := c.db.BOperation(op, dstKey, srcKeys...); err != nil { - return err - } else { - c.resp.writeInteger(int64(blen)) - } - return nil -} +// if len(srcKeys) == 0 { +// return ErrCmdParams +// } +// if blen, err := c.db.BOperation(op, dstKey, srcKeys...); err != nil { +// return err +// } else { +// c.resp.writeInteger(int64(blen)) +// } +// return nil +// } -func bexpireCommand(c *client) error { - args := c.args - if len(args) != 2 { - return ErrCmdParams - } +// func bexpireCommand(c *client) error { +// args := c.args +// if len(args) != 2 { +// return ErrCmdParams +// } - duration, err := ledis.StrInt64(args[1], nil) - if err != nil { - return ErrValue - } +// duration, err := ledis.StrInt64(args[1], nil) +// if err != nil { +// return ErrValue +// } - if v, err := c.db.BExpire(args[0], duration); err != nil { - return err - } else { - c.resp.writeInteger(v) - } +// if v, err := c.db.BExpire(args[0], duration); err != nil { +// return err +// } else { +// c.resp.writeInteger(v) +// } - return nil -} +// return nil +// } -func bexpireAtCommand(c *client) error { - args := c.args - if len(args) != 2 { - return ErrCmdParams - } +// func bexpireAtCommand(c *client) error { +// args := c.args +// if len(args) != 2 { +// return ErrCmdParams +// } - when, err := ledis.StrInt64(args[1], nil) - if err != nil { - return ErrValue - } +// when, err := ledis.StrInt64(args[1], nil) +// if err != nil { +// return ErrValue +// } - if v, err := c.db.BExpireAt(args[0], when); err != nil { - return err - } else { - c.resp.writeInteger(v) - } +// if v, err := c.db.BExpireAt(args[0], when); err != nil { +// return err +// } else { +// c.resp.writeInteger(v) +// } - return nil -} +// return nil +// } -func bttlCommand(c *client) error { - args := c.args - if len(args) != 1 { - return ErrCmdParams - } +// func bttlCommand(c *client) error { +// args := c.args +// if len(args) != 1 { +// return ErrCmdParams +// } - if v, err := c.db.BTTL(args[0]); err != nil { - return err - } else { - c.resp.writeInteger(v) - } +// if v, err := c.db.BTTL(args[0]); err != nil { +// return err +// } else { +// c.resp.writeInteger(v) +// } - return nil -} +// return nil +// } -func bpersistCommand(c *client) error { - args := c.args - if len(args) != 1 { - return ErrCmdParams - } +// func bpersistCommand(c *client) error { +// args := c.args +// if len(args) != 1 { +// return ErrCmdParams +// } - if n, err := c.db.BPersist(args[0]); err != nil { - return err - } else { - c.resp.writeInteger(n) - } +// if n, err := c.db.BPersist(args[0]); err != nil { +// return err +// } else { +// c.resp.writeInteger(n) +// } - return nil -} +// return nil +// } -func bxscanCommand(c *client) error { - return xscanGeneric(c, c.db.BScan) -} - -func bxrevscanCommand(c *client) error { - return xscanGeneric(c, c.db.BRevScan) -} - -func init() { - register("bget", bgetCommand) - register("bdelete", bdeleteCommand) - register("bsetbit", bsetbitCommand) - register("bgetbit", bgetbitCommand) - register("bmsetbit", bmsetbitCommand) - register("bcount", bcountCommand) - register("bopt", boptCommand) - register("bexpire", bexpireCommand) - register("bexpireat", bexpireAtCommand) - register("bttl", bttlCommand) - register("bpersist", bpersistCommand) - register("bxscan", bxscanCommand) - register("bxrevscan", bxrevscanCommand) - register("xbscan", bxscanCommand) - register("xbrevscan", bxrevscanCommand) -} +// func init() { +// register("bget", bgetCommand) +// register("bdelete", bdeleteCommand) +// register("bsetbit", bsetbitCommand) +// register("bgetbit", bgetbitCommand) +// register("bmsetbit", bmsetbitCommand) +// register("bcount", bcountCommand) +// register("bopt", boptCommand) +// register("bexpire", bexpireCommand) +// register("bexpireat", bexpireAtCommand) +// register("bttl", bttlCommand) +// register("bpersist", bpersistCommand) +// } diff --git a/server/cmd_bit_test.go b/server/cmd_bit_test.go index a8f84fb..f6344c4 100644 --- a/server/cmd_bit_test.go +++ b/server/cmd_bit_test.go @@ -1,7 +1,7 @@ package server // import ( -// "github.com/siddontang/ledisdb/client/go/ledis" +// "github.com/siddontang/ledisdb/client/goledis" // "testing" // ) diff --git a/server/cmd_hash.go b/server/cmd_hash.go index 8c277ee..77b1f40 100644 --- a/server/cmd_hash.go +++ b/server/cmd_hash.go @@ -292,20 +292,12 @@ func hpersistCommand(c *client) error { return nil } -func hxscanCommand(c *client) error { - return xscanGeneric(c, c.db.HScan) -} - -func hxrevscanCommand(c *client) error { - return xscanGeneric(c, c.db.HRevScan) -} - -func xhexistsCommand(c *client) error { +func hkeyexistsCommand(c *client) error { args := c.args if len(args) != 1 { return ErrCmdParams } - if n, err := c.db.XHExists(args[0]); err != nil { + if n, err := c.db.HKeyExists(args[0]); err != nil { return err } else { c.resp.writeInteger(n) @@ -334,9 +326,5 @@ func init() { register("hexpireat", hexpireAtCommand) register("httl", httlCommand) register("hpersist", hpersistCommand) - register("hxscan", hxscanCommand) - register("hxrevscan", hxrevscanCommand) - register("xhscan", hxscanCommand) - register("xhrevscan", hxrevscanCommand) - register("xhexists", xhexistsCommand) + register("hkeyexists", hkeyexistsCommand) } diff --git a/server/cmd_hash_test.go b/server/cmd_hash_test.go index 7ff5f40..c9bbd77 100644 --- a/server/cmd_hash_test.go +++ b/server/cmd_hash_test.go @@ -2,7 +2,7 @@ package server import ( "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "strconv" "testing" ) @@ -12,7 +12,7 @@ func TestHash(t *testing.T) { defer c.Close() key := []byte("a") - if n, err := ledis.Int(c.Do("xhexists", key)); err != nil { + if n, err := ledis.Int(c.Do("hkeyexists", key)); err != nil { t.Fatal(err) } else if n != 0 { t.Fatal(n) @@ -23,7 +23,7 @@ func TestHash(t *testing.T) { } else if n != 1 { t.Fatal(n) } - if n, err := ledis.Int(c.Do("xhexists", key)); err != nil { + if n, err := ledis.Int(c.Do("hkeyexists", key)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) diff --git a/server/cmd_kv.go b/server/cmd_kv.go index 45a4b7a..25d7bb9 100644 --- a/server/cmd_kv.go +++ b/server/cmd_kv.go @@ -1,10 +1,8 @@ package server import ( - "github.com/siddontang/go/hack" "github.com/siddontang/ledisdb/ledis" "strconv" - "strings" ) // func getCommand(c *client) error { @@ -315,82 +313,6 @@ func persistCommand(c *client) error { return nil } -func parseScanArgs(c *client) (key []byte, match string, count int, err error) { - args := c.args - count = 10 - - switch len(args) { - case 0: - key = nil - return - case 1, 3, 5: - key = args[0] - break - default: - err = ErrCmdParams - return - } - - if len(args) == 3 { - switch strings.ToLower(hack.String(args[1])) { - case "match": - match = hack.String(args[2]) - case "count": - count, err = strconv.Atoi(hack.String(args[2])) - default: - err = ErrCmdParams - return - } - } else if len(args) == 5 { - if strings.ToLower(hack.String(args[1])) != "match" { - err = ErrCmdParams - return - } else if strings.ToLower(hack.String(args[3])) != "count" { - err = ErrCmdParams - return - } - - match = hack.String(args[2]) - count, err = strconv.Atoi(hack.String(args[4])) - } - - if count <= 0 { - err = ErrCmdParams - } - - return -} - -func xscanGeneric(c *client, - f func(key []byte, count int, inclusive bool, match string) ([][]byte, error)) error { - key, match, count, err := parseScanArgs(c) - if err != nil { - return err - } - - if ay, err := f(key, count, false, match); err != nil { - return err - } else { - data := make([]interface{}, 2) - if len(ay) < count { - data[0] = []byte("") - } else { - data[0] = ay[len(ay)-1] - } - data[1] = ay - c.resp.writeArray(data) - } - return nil -} - -func xscanCommand(c *client) error { - return xscanGeneric(c, c.db.Scan) -} - -func xrevscanCommand(c *client) error { - return xscanGeneric(c, c.db.RevScan) -} - func appendCommand(c *client) error { args := c.args if len(args) != 2 { @@ -619,6 +541,4 @@ func init() { register("expireat", expireAtCommand) register("ttl", ttlCommand) register("persist", persistCommand) - register("xscan", xscanCommand) - register("xrevscan", xrevscanCommand) } diff --git a/server/cmd_kv_test.go b/server/cmd_kv_test.go index e02066f..b4497f9 100644 --- a/server/cmd_kv_test.go +++ b/server/cmd_kv_test.go @@ -1,7 +1,7 @@ package server import ( - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "testing" ) diff --git a/server/cmd_list.go b/server/cmd_list.go index 7000a74..3d96a4c 100644 --- a/server/cmd_list.go +++ b/server/cmd_list.go @@ -231,14 +231,6 @@ func lpersistCommand(c *client) error { return nil } -func lxscanCommand(c *client) error { - return xscanGeneric(c, c.db.LScan) -} - -func lxrevscanCommand(c *client) error { - return xscanGeneric(c, c.db.LRevScan) -} - func blpopCommand(c *client) error { keys, timeout, err := lParseBPopArgs(c) if err != nil { @@ -285,12 +277,13 @@ func lParseBPopArgs(c *client) (keys [][]byte, timeout time.Duration, err error) keys = args[0 : len(args)-1] return } -func xlexistsCommand(c *client) error { + +func lkeyexistsCommand(c *client) error { args := c.args if len(args) != 1 { return ErrCmdParams } - if n, err := c.db.XLExists(args[0]); err != nil { + if n, err := c.db.LKeyExists(args[0]); err != nil { return err } else { c.resp.writeInteger(n) @@ -317,9 +310,5 @@ func init() { register("lexpireat", lexpireAtCommand) register("lttl", lttlCommand) register("lpersist", lpersistCommand) - register("lxscan", lxscanCommand) - register("lxrevscan", lxrevscanCommand) - register("xlscan", lxscanCommand) - register("xlrevscan", lxrevscanCommand) - register("xlexists", xlexistsCommand) + register("lkeyexists", lkeyexistsCommand) } diff --git a/server/cmd_list_test.go b/server/cmd_list_test.go index 526862b..5a237fc 100644 --- a/server/cmd_list_test.go +++ b/server/cmd_list_test.go @@ -2,7 +2,7 @@ package server import ( "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "strconv" "testing" ) @@ -58,7 +58,7 @@ func TestList(t *testing.T) { defer c.Close() key := []byte("a") - if n, err := ledis.Int(c.Do("xlexists", key)); err != nil { + if n, err := ledis.Int(c.Do("lkeyexists", key)); err != nil { t.Fatal(err) } else if n != 0 { t.Fatal(n) @@ -70,7 +70,7 @@ func TestList(t *testing.T) { t.Fatal(n) } - if n, err := ledis.Int(c.Do("xlexists", key)); err != nil { + if n, err := ledis.Int(c.Do("lkeyexists", key)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(1) diff --git a/server/cmd_migrate.go b/server/cmd_migrate.go index 3153821..ffe47c7 100644 --- a/server/cmd_migrate.go +++ b/server/cmd_migrate.go @@ -2,7 +2,7 @@ package server import ( "fmt" - goledis "github.com/siddontang/ledisdb/client/go/ledis" + goledis "github.com/siddontang/ledisdb/client/goledis" "github.com/siddontang/ledisdb/ledis" "strings" "time" @@ -165,15 +165,15 @@ func xttl(db *ledis.DB, tp string, key []byte) (int64, error) { func xscan(db *ledis.DB, tp string, count int) ([][]byte, error) { switch strings.ToUpper(tp) { case "KV": - return db.Scan(nil, count, false, "") + return db.Scan(KV, nil, count, false, "") case "HASH": - return db.HScan(nil, count, false, "") + return db.Scan(HASH, nil, count, false, "") case "LIST": - return db.LScan(nil, count, false, "") + return db.Scan(LIST, nil, count, false, "") case "SET": - return db.SScan(nil, count, false, "") + return db.Scan(SET, nil, count, false, "") case "ZSET": - return db.ZScan(nil, count, false, "") + return db.Scan(ZSET, nil, count, false, "") default: return nil, fmt.Errorf("invalid key type %s", tp) } @@ -239,8 +239,8 @@ func xmigratedbCommand(c *client) error { db, err := ledis.StrUint64(args[4], nil) if err != nil { return err - } else if db >= uint64(ledis.MaxDBNumber) { - return fmt.Errorf("invalid db index %d, must < %d", db, ledis.MaxDBNumber) + } else if db >= uint64(c.app.cfg.Databases) { + return fmt.Errorf("invalid db index %d, must < %d", db, c.app.cfg.Databases) } timeout, err := ledis.StrInt64(args[5], nil) @@ -266,11 +266,13 @@ func xmigratedbCommand(c *client) error { mc := c.app.getMigrateClient(addr) - conn := mc.Get() + conn, err := mc.Get() + if err != nil { + return err + } //timeout is milliseconds t := time.Duration(timeout) * time.Millisecond - conn.SetConnectTimeout(t) if _, err = conn.Do("select", db); err != nil { return err @@ -326,8 +328,8 @@ func xmigrateCommand(c *client) error { db, err := ledis.StrUint64(args[4], nil) if err != nil { return err - } else if db >= uint64(ledis.MaxDBNumber) { - return fmt.Errorf("invalid db index %d, must < %d", db, ledis.MaxDBNumber) + } else if db >= uint64(c.app.cfg.Databases) { + return fmt.Errorf("invalid db index %d, must < %d", db, c.app.cfg.Databases) } timeout, err := ledis.StrInt64(args[5], nil) @@ -358,11 +360,13 @@ func xmigrateCommand(c *client) error { mc := c.app.getMigrateClient(addr) - conn := mc.Get() + conn, err := mc.Get() + if err != nil { + return err + } //timeout is milliseconds t := time.Duration(timeout) * time.Millisecond - conn.SetConnectTimeout(t) if _, err = conn.Do("select", db); err != nil { return err diff --git a/server/cmd_migrate_test.go b/server/cmd_migrate_test.go index 1dfb2c0..0bfc6cb 100644 --- a/server/cmd_migrate_test.go +++ b/server/cmd_migrate_test.go @@ -2,7 +2,7 @@ package server import ( "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "github.com/siddontang/ledisdb/config" "os" "testing" @@ -80,10 +80,10 @@ func TestMigrate(t *testing.T) { time.Sleep(1 * time.Second) - c1 := ledis.NewConn(s1Cfg.Addr) + c1, _ := ledis.Connect(s1Cfg.Addr) defer c1.Close() - c2 := ledis.NewConn(s2Cfg.Addr) + c2, _ := ledis.Connect(s2Cfg.Addr) defer c2.Close() if _, err = c1.Do("set", "a", "1"); err != nil { diff --git a/server/cmd_replication.go b/server/cmd_replication.go index 7aa2e17..390530e 100644 --- a/server/cmd_replication.go +++ b/server/cmd_replication.go @@ -131,7 +131,7 @@ func syncCommand(c *client) error { c.syncBuf.Write(dummyBuf) - if _, _, err := c.app.ldb.ReadLogsToTimeout(logId, &c.syncBuf, 30, c.app.quit); err != nil { + if _, _, err := c.app.ldb.ReadLogsToTimeout(logId, &c.syncBuf, 1, c.app.quit); err != nil { return err } else { buf := c.syncBuf.Bytes() @@ -157,6 +157,10 @@ func replconfCommand(c *client) error { return ErrCmdParams } + if !c.app.ldb.ReplicationUsed() { + return ledis.ErrRplNotSupport + } + //now only support "listening-port" for i := 0; i < len(args); i += 2 { switch strings.ToLower(hack.String(args[i])) { @@ -188,9 +192,11 @@ func roleCommand(c *client) error { } c.app.m.Lock() - isMaster := len(c.app.cfg.SlaveOf) == 0 + slaveof := c.app.cfg.SlaveOf c.app.m.Unlock() + isMaster := len(slaveof) == 0 + ay := make([]interface{}, 0, 5) var lastId int64 = 0 @@ -217,7 +223,7 @@ func roleCommand(c *client) error { c.app.slock.Unlock() ay = append(ay, items) } else { - host, port, _ := splitHostPort(c.app.cfg.Addr) + host, port, _ := splitHostPort(slaveof) ay = append(ay, []byte("slave")) ay = append(ay, []byte(host)) ay = append(ay, int64(port)) diff --git a/server/cmd_replication_test.go b/server/cmd_replication_test.go index 6be89a8..f4c13fc 100644 --- a/server/cmd_replication_test.go +++ b/server/cmd_replication_test.go @@ -2,7 +2,7 @@ package server import ( "fmt" - goledis "github.com/siddontang/ledisdb/client/go/ledis" + goledis "github.com/siddontang/ledisdb/client/goledis" "github.com/siddontang/ledisdb/config" "os" "reflect" @@ -14,8 +14,8 @@ func checkDataEqual(master *App, slave *App) error { mdb, _ := master.ldb.Select(0) sdb, _ := slave.ldb.Select(0) - mkeys, _ := mdb.Scan(nil, 100, true, "") - skeys, _ := sdb.Scan(nil, 100, true, "") + mkeys, _ := mdb.Scan(KV, nil, 100, true, "") + skeys, _ := sdb.Scan(KV, nil, 100, true, "") if len(mkeys) != len(skeys) { return fmt.Errorf("keys number not equal %d != %d", len(mkeys), len(skeys)) @@ -140,7 +140,7 @@ func TestReplication(t *testing.T) { if err = checkTestRole(slaveCfg.Addr, []interface{}{ []byte("slave"), []byte("127.0.0.1"), - int64(11183), + int64(11182), []byte("connected"), int64(sStat.LastID), }); err != nil { @@ -159,7 +159,7 @@ func TestReplication(t *testing.T) { } func checkTestRole(addr string, checkRoles []interface{}) error { - conn := goledis.NewConn(addr) + conn, _ := goledis.Connect(addr) defer conn.Close() roles, err := goledis.MultiBulk(conn.Do("ROLE")) if err != nil { diff --git a/server/cmd_scan.go b/server/cmd_scan.go new file mode 100644 index 0000000..1e62d8f --- /dev/null +++ b/server/cmd_scan.go @@ -0,0 +1,215 @@ +package server + +import ( + "fmt" + "github.com/siddontang/go/hack" + "github.com/siddontang/go/num" + "github.com/siddontang/ledisdb/ledis" + "strconv" + "strings" +) + +func parseScanArgs(args [][]byte) (cursor []byte, match string, count int, err error) { + cursor = args[0] + + args = args[1:] + + count = 10 + + for i := 0; i < len(args); { + switch strings.ToUpper(hack.String(args[i])) { + case "MATCH": + if i+1 >= len(args) { + err = ErrCmdParams + return + } + + match = hack.String(args[i+1]) + i = i + 2 + case "COUNT": + if i+1 >= len(args) { + err = ErrCmdParams + return + } + + count, err = strconv.Atoi(hack.String(args[i+1])) + if err != nil { + return + } + + i = i + 2 + default: + err = fmt.Errorf("invalid argument %s", args[i]) + return + } + } + + return +} + +// XSCAN type cursor [MATCH match] [COUNT count] +func xscanCommand(c *client) error { + args := c.args + + if len(args) < 2 { + return ErrCmdParams + } + + var dataType ledis.DataType + switch strings.ToUpper(hack.String(args[0])) { + case "KV": + dataType = ledis.KV + case "HASH": + dataType = ledis.HASH + case "LIST": + dataType = ledis.LIST + case "SET": + dataType = ledis.SET + case "ZSET": + dataType = ledis.ZSET + default: + return fmt.Errorf("invalid key type %s", args[0]) + } + + cursor, match, count, err := parseScanArgs(args[1:]) + + if err != nil { + return err + } + + ay, err := c.db.Scan(dataType, cursor, count, false, match) + if err != nil { + return err + } + + data := make([]interface{}, 2) + if len(ay) < count { + data[0] = []byte("") + } else { + data[0] = ay[len(ay)-1] + } + data[1] = ay + c.resp.writeArray(data) + return nil +} + +// XHSCAN key cursor [MATCH match] [COUNT count] +func xhscanCommand(c *client) error { + args := c.args + + if len(args) < 2 { + return ErrCmdParams + } + + key := args[0] + + cursor, match, count, err := parseScanArgs(args[1:]) + + if err != nil { + return err + } + + ay, err := c.db.HScan(key, cursor, count, false, match) + if err != nil { + return err + } + + data := make([]interface{}, 2) + if len(ay) < count { + data[0] = []byte("") + } else { + data[0] = ay[len(ay)-1].Field + } + + vv := make([][]byte, 0, len(ay)*2) + + for _, v := range ay { + vv = append(vv, v.Field, v.Value) + } + + data[1] = vv + + c.resp.writeArray(data) + return nil +} + +// XSSCAN key cursor [MATCH match] [COUNT count] +func xsscanCommand(c *client) error { + args := c.args + + if len(args) < 2 { + return ErrCmdParams + } + + key := args[0] + + cursor, match, count, err := parseScanArgs(args[1:]) + + if err != nil { + return err + } + + ay, err := c.db.SScan(key, cursor, count, false, match) + if err != nil { + return err + } + + data := make([]interface{}, 2) + if len(ay) < count { + data[0] = []byte("") + } else { + data[0] = ay[len(ay)-1] + } + + data[1] = ay + + c.resp.writeArray(data) + return nil +} + +// XZSCAN key cursor [MATCH match] [COUNT count] +func xzscanCommand(c *client) error { + args := c.args + + if len(args) < 2 { + return ErrCmdParams + } + + key := args[0] + + cursor, match, count, err := parseScanArgs(args[1:]) + + if err != nil { + return err + } + + ay, err := c.db.ZScan(key, cursor, count, false, match) + if err != nil { + return err + } + + data := make([]interface{}, 2) + if len(ay) < count { + data[0] = []byte("") + } else { + data[0] = ay[len(ay)-1].Member + } + + vv := make([][]byte, 0, len(ay)*2) + + for _, v := range ay { + vv = append(vv, v.Member, num.FormatInt64ToSlice(v.Score)) + } + + data[1] = vv + + c.resp.writeArray(data) + return nil +} + +func init() { + register("xscan", xscanCommand) + register("xhscan", xhscanCommand) + register("xsscan", xsscanCommand) + register("xzscan", xzscanCommand) +} diff --git a/server/scan_test.go b/server/cmd_scan_test.go similarity index 57% rename from server/scan_test.go rename to server/cmd_scan_test.go index 1778416..328ecad 100644 --- a/server/scan_test.go +++ b/server/cmd_scan_test.go @@ -2,7 +2,7 @@ package server import ( "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "github.com/siddontang/ledisdb/config" "os" "testing" @@ -29,14 +29,14 @@ func TestScan(t *testing.T) { defer c.Close() testKVScan(t, c) - testHashScan(t, c) - testListScan(t, c) - testZSetScan(t, c) - testSetScan(t, c) + testHashKeyScan(t, c) + testListKeyScan(t, c) + testZSetKeyScan(t, c) + testSetKeyScan(t, c) } -func checkScanValues(t *testing.T, ay interface{}, values ...int) { +func checkScanValues(t *testing.T, ay interface{}, values ...interface{}) { a, err := ledis.Strings(ay, nil) if err != nil { t.Fatal(err) @@ -47,14 +47,14 @@ func checkScanValues(t *testing.T, ay interface{}, values ...int) { } for i, v := range a { - if string(v) != fmt.Sprintf("%d", values[i]) { - t.Fatal(fmt.Sprintf("%d %s != %d", string(v), values[i])) + if string(v) != fmt.Sprintf("%v", values[i]) { + t.Fatal(fmt.Sprintf("%d %s != %v", string(v), values[i])) } } } -func checkScan(t *testing.T, c *ledis.Client, cmd string) { - if ay, err := ledis.Values(c.Do(cmd, "", "count", 5)); err != nil { +func checkScan(t *testing.T, c *ledis.Client, tp string) { + if ay, err := ledis.Values(c.Do("XSCAN", tp, "", "count", 5)); err != nil { t.Fatal(err) } else if len(ay) != 2 { t.Fatal(len(ay)) @@ -64,7 +64,7 @@ func checkScan(t *testing.T, c *ledis.Client, cmd string) { checkScanValues(t, ay[1], 0, 1, 2, 3, 4) } - if ay, err := ledis.Values(c.Do(cmd, "4", "count", 6)); err != nil { + if ay, err := ledis.Values(c.Do("XSCAN", tp, "4", "count", 6)); err != nil { t.Fatal(err) } else if len(ay) != 2 { t.Fatal(len(ay)) @@ -76,29 +76,6 @@ func checkScan(t *testing.T, c *ledis.Client, cmd string) { } -func checkRevScan(t *testing.T, c *ledis.Client, cmd string) { - if ay, err := ledis.Values(c.Do(cmd, "", "count", 5)); err != nil { - t.Fatal(err) - } else if len(ay) != 2 { - t.Fatal(len(ay)) - } else if n := ay[0].([]byte); string(n) != "5" { - t.Fatal(string(n)) - } else { - checkScanValues(t, ay[1], 9, 8, 7, 6, 5) - } - - if ay, err := ledis.Values(c.Do(cmd, "5", "count", 6)); err != nil { - t.Fatal(err) - } else if len(ay) != 2 { - t.Fatal(len(ay)) - } else if n := ay[0].([]byte); string(n) != "" { - t.Fatal(string(n)) - } else { - checkScanValues(t, ay[1], 4, 3, 2, 1, 0) - } - -} - func testKVScan(t *testing.T, c *ledis.Client) { for i := 0; i < 10; i++ { if _, err := c.Do("set", fmt.Sprintf("%d", i), []byte("value")); err != nil { @@ -106,50 +83,95 @@ func testKVScan(t *testing.T, c *ledis.Client) { } } - checkScan(t, c, "xscan") - checkRevScan(t, c, "xrevscan") + checkScan(t, c, "KV") } -func testHashScan(t *testing.T, c *ledis.Client) { +func testHashKeyScan(t *testing.T, c *ledis.Client) { for i := 0; i < 10; i++ { if _, err := c.Do("hset", fmt.Sprintf("%d", i), fmt.Sprintf("%d", i), []byte("value")); err != nil { t.Fatal(err) } } - checkScan(t, c, "xhscan") - checkRevScan(t, c, "xhrevscan") + checkScan(t, c, "HASH") } -func testListScan(t *testing.T, c *ledis.Client) { +func testListKeyScan(t *testing.T, c *ledis.Client) { for i := 0; i < 10; i++ { if _, err := c.Do("lpush", fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)); err != nil { t.Fatal(err) } } - checkScan(t, c, "xlscan") - checkRevScan(t, c, "xlrevscan") + checkScan(t, c, "LIST") } -func testZSetScan(t *testing.T, c *ledis.Client) { +func testZSetKeyScan(t *testing.T, c *ledis.Client) { for i := 0; i < 10; i++ { if _, err := c.Do("zadd", fmt.Sprintf("%d", i), i, []byte("value")); err != nil { t.Fatal(err) } } - checkScan(t, c, "zxscan") - checkRevScan(t, c, "zxrevscan") + checkScan(t, c, "ZSET") } -func testSetScan(t *testing.T, c *ledis.Client) { +func testSetKeyScan(t *testing.T, c *ledis.Client) { for i := 0; i < 10; i++ { if _, err := c.Do("sadd", fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)); err != nil { t.Fatal(err) } } - checkScan(t, c, "xsscan") - checkRevScan(t, c, "xsrevscan") + checkScan(t, c, "SET") +} + +func TestHashScan(t *testing.T) { + c := getTestConn() + defer c.Close() + + key := "scan_hash" + c.Do("HMSET", key, "a", 1, "b", 2) + + if ay, err := ledis.Values(c.Do("XHSCAN", key, "")); err != nil { + t.Fatal(err) + } else if len(ay) != 2 { + t.Fatal(len(ay)) + } else { + checkScanValues(t, ay[1], "a", 1, "b", 2) + } +} + +func TestSetScan(t *testing.T) { + c := getTestConn() + defer c.Close() + + key := "scan_set" + c.Do("SADD", key, "a", "b") + + if ay, err := ledis.Values(c.Do("XSSCAN", key, "")); err != nil { + t.Fatal(err) + } else if len(ay) != 2 { + t.Fatal(len(ay)) + } else { + checkScanValues(t, ay[1], "a", "b") + } + +} + +func TestZSetScan(t *testing.T) { + c := getTestConn() + defer c.Close() + + key := "scan_zset" + c.Do("ZADD", key, 1, "a", 2, "b") + + if ay, err := ledis.Values(c.Do("XZSCAN", key, "")); err != nil { + t.Fatal(err) + } else if len(ay) != 2 { + t.Fatal(len(ay)) + } else { + checkScanValues(t, ay[1], "a", 1, "b", 2) + } + } diff --git a/server/cmd_script_test.go b/server/cmd_script_test.go index 017e527..88758fd 100644 --- a/server/cmd_script_test.go +++ b/server/cmd_script_test.go @@ -4,7 +4,7 @@ package server import ( "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "reflect" "testing" ) diff --git a/server/cmd_server.go b/server/cmd_server.go index d71da77..b402fca 100644 --- a/server/cmd_server.go +++ b/server/cmd_server.go @@ -111,6 +111,23 @@ func timeCommand(c *client) error { return nil } +func configGetCommand(c *client) error { + args := c.args + if len(args) != 2 { + return ErrCmdParams + } + + ay := make([][]byte, 0, 2) + key := hack.String(args[1]) + switch key { + case "databases": + ay = append(ay, []byte("databases"), num.FormatUint8ToSlice(c.app.cfg.Databases)) + } + + c.resp.writeSliceArray(ay) + return nil +} + func configCommand(c *client) error { if len(c.args) < 1 { return ErrCmdParams @@ -124,6 +141,8 @@ func configCommand(c *client) error { c.resp.writeStatus(OK) return nil } + case "get": + return configGetCommand(c) default: return ErrCmdParams } diff --git a/server/cmd_set.go b/server/cmd_set.go index ae22bc5..4b780b7 100644 --- a/server/cmd_set.go +++ b/server/cmd_set.go @@ -262,20 +262,12 @@ func spersistCommand(c *client) error { return nil } -func sxscanCommand(c *client) error { - return xscanGeneric(c, c.db.SScan) -} - -func sxrevscanCommand(c *client) error { - return xscanGeneric(c, c.db.SRevScan) -} - -func xsexistsCommand(c *client) error { +func skeyexistsCommand(c *client) error { args := c.args if len(args) != 1 { return ErrCmdParams } - if n, err := c.db.XSExists(args[0]); err != nil { + if n, err := c.db.SKeyExists(args[0]); err != nil { return err } else { c.resp.writeInteger(n) @@ -295,16 +287,13 @@ func init() { register("srem", sremCommand) register("sunion", sunionCommand) register("sunionstore", sunionstoreCommand) + register("sclear", sclearCommand) register("smclear", smclearCommand) register("sexpire", sexpireCommand) register("sexpireat", sexpireAtCommand) register("sttl", sttlCommand) register("spersist", spersistCommand) - register("sxscan", sxscanCommand) - register("sxrevscan", sxrevscanCommand) - register("xsscan", sxscanCommand) - register("xsrevscan", sxrevscanCommand) - register("xsexists", xsexistsCommand) + register("skeyexists", skeyexistsCommand) } diff --git a/server/cmd_set_test.go b/server/cmd_set_test.go index 03c0702..2680c81 100644 --- a/server/cmd_set_test.go +++ b/server/cmd_set_test.go @@ -1,7 +1,7 @@ package server import ( - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "testing" ) @@ -12,7 +12,7 @@ func TestSet(t *testing.T) { key1 := "testdb_cmd_set_1" key2 := "testdb_cmd_set_2" - if n, err := ledis.Int(c.Do("xsexists", key1)); err != nil { + if n, err := ledis.Int(c.Do("skeyexists", key1)); err != nil { t.Fatal(err) } else if n != 0 { t.Fatal(n) @@ -24,7 +24,7 @@ func TestSet(t *testing.T) { t.Fatal(n) } - if n, err := ledis.Int(c.Do("xsexists", key1)); err != nil { + if n, err := ledis.Int(c.Do("skeyexists", key1)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) diff --git a/server/cmd_sort_test.go b/server/cmd_sort_test.go index 9812611..d70dc05 100644 --- a/server/cmd_sort_test.go +++ b/server/cmd_sort_test.go @@ -2,7 +2,7 @@ package server import ( "fmt" - goledis "github.com/siddontang/ledisdb/client/go/ledis" + goledis "github.com/siddontang/ledisdb/client/goledis" "testing" ) diff --git a/server/cmd_ttl_test.go b/server/cmd_ttl_test.go index 5be1347..0b4d161 100644 --- a/server/cmd_ttl_test.go +++ b/server/cmd_ttl_test.go @@ -2,7 +2,7 @@ package server import ( "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "testing" "time" ) diff --git a/server/cmd_zset.go b/server/cmd_zset.go index ddeefbd..9c6fdac 100644 --- a/server/cmd_zset.go +++ b/server/cmd_zset.go @@ -641,14 +641,6 @@ func zinterstoreCommand(c *client) error { return err } -func zxscanCommand(c *client) error { - return xscanGeneric(c, c.db.ZScan) -} - -func zxrevscanCommand(c *client) error { - return xscanGeneric(c, c.db.ZRevScan) -} - func zparseMemberRange(minBuf []byte, maxBuf []byte) (min []byte, max []byte, rangeType uint8, err error) { rangeType = store.RangeClose if strings.ToLower(hack.String(minBuf)) == "-" { @@ -771,12 +763,12 @@ func zlexcountCommand(c *client) error { return nil } -func xzexistsCommand(c *client) error { +func zkeyexistsCommand(c *client) error { args := c.args if len(args) != 1 { return ErrCmdParams } - if n, err := c.db.XZExists(args[0]); err != nil { + if n, err := c.db.ZKeyExists(args[0]); err != nil { return err } else { c.resp.writeInteger(n) @@ -815,9 +807,5 @@ func init() { register("zexpireat", zexpireAtCommand) register("zttl", zttlCommand) register("zpersist", zpersistCommand) - register("zxscan", zxscanCommand) - register("zxrevscan", zxrevscanCommand) - register("xzscan", zxscanCommand) - register("xzrevscan", zxrevscanCommand) - register("xzexists", xzexistsCommand) + register("zkeyexists", zkeyexistsCommand) } diff --git a/server/cmd_zset_test.go b/server/cmd_zset_test.go index 7760e7b..fbb9a80 100644 --- a/server/cmd_zset_test.go +++ b/server/cmd_zset_test.go @@ -2,7 +2,7 @@ package server import ( "fmt" - "github.com/siddontang/ledisdb/client/go/ledis" + "github.com/siddontang/ledisdb/client/goledis" "reflect" "strconv" "testing" @@ -14,7 +14,7 @@ func TestZSet(t *testing.T) { key := []byte("myzset") - if n, err := ledis.Int(c.Do("xzexists", key)); err != nil { + if n, err := ledis.Int(c.Do("zkeyexists", key)); err != nil { t.Fatal(err) } else if n != 0 { t.Fatal(n) @@ -26,7 +26,7 @@ func TestZSet(t *testing.T) { t.Fatal(n) } - if n, err := ledis.Int(c.Do("xzexists", key)); err != nil { + if n, err := ledis.Int(c.Do("zkeyexists", key)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) diff --git a/server/const.go b/server/const.go index dc55e24..9804ad7 100644 --- a/server/const.go +++ b/server/const.go @@ -2,6 +2,7 @@ package server import ( "errors" + "github.com/siddontang/ledisdb/ledis" ) var ( @@ -26,12 +27,11 @@ var ( ) const ( - KV = iota - LIST - HASH - SET - ZSET - BIT + KV ledis.DataType = ledis.KV + LIST = ledis.LIST + HASH = ledis.HASH + SET = ledis.SET + ZSET = ledis.ZSET ) const ( diff --git a/server/info.go b/server/info.go index b06b084..ef4d089 100644 --- a/server/info.go +++ b/server/info.go @@ -205,6 +205,18 @@ func (i *info) dumpReplication(buf *bytes.Buffer) { } i.app.slock.Unlock() + i.app.m.Lock() + slaveof := i.app.cfg.SlaveOf + i.app.m.Unlock() + + isSlave := len(slaveof) > 0 + + if isSlave { + p = append(p, infoPair{"role", "slave"}) + } else { + p = append(p, infoPair{"role", "master"}) + } + num := i.Replication.PubLogNum.Get() p = append(p, infoPair{"pub_log_num", num}) @@ -216,13 +228,14 @@ func (i *info) dumpReplication(buf *bytes.Buffer) { p = append(p, infoPair{"pub_log_ack_per_time", 0}) } - p = append(p, infoPair{"slaveof", i.app.cfg.SlaveOf}) + p = append(p, infoPair{"slaveof", slaveof}) if len(slaves) > 0 { p = append(p, infoPair{"slaves", strings.Join(slaves, ",")}) } - if s, _ := i.app.ldb.ReplicationStat(); s != nil { + s, _ := i.app.ldb.ReplicationStat() + if s != nil { p = append(p, infoPair{"last_log_id", s.LastID}) p = append(p, infoPair{"first_log_id", s.FirstID}) p = append(p, infoPair{"commit_log_id", s.CommitID}) @@ -234,6 +247,28 @@ func (i *info) dumpReplication(buf *bytes.Buffer) { p = append(p, infoPair{"master_last_log_id", i.Replication.MasterLastLogID.Get()}) + if isSlave { + // add some redis slave replication info for outer failover service :-) + state := i.app.m.state.Get() + if state == replSyncState || state == replConnectedState { + p = append(p, infoPair{"master_link_status", "up"}) + } else { + p = append(p, infoPair{"master_link_status", "down"}) + } + + // here, all the slaves have same priority now + p = append(p, infoPair{"slave_priority", 100}) + if s != nil { + if s.LastID > 0 { + p = append(p, infoPair{"slave_repl_offset", s.LastID}) + } else { + p = append(p, infoPair{"slave_repl_offset", s.CommitID}) + } + } else { + p = append(p, infoPair{"slave_repl_offset", 0}) + } + } + i.dumpPairs(buf, p...) } diff --git a/server/replication.go b/server/replication.go index 2633ee5..8663203 100644 --- a/server/replication.go +++ b/server/replication.go @@ -7,7 +7,7 @@ import ( "github.com/siddontang/go/log" "github.com/siddontang/go/num" "github.com/siddontang/go/sync2" - goledis "github.com/siddontang/ledisdb/client/go/ledis" + goledis "github.com/siddontang/ledisdb/client/goledis" "github.com/siddontang/ledisdb/ledis" "github.com/siddontang/ledisdb/rpl" "net" @@ -34,10 +34,22 @@ const ( replConnectedState ) +type syncBuffer struct { + m *master + bytes.Buffer +} + +func (b *syncBuffer) Write(data []byte) (int, error) { + b.m.state.Set(replSyncState) + n, err := b.Buffer.Write(data) + return n, err +} + type master struct { sync.Mutex - conn *goledis.Conn + connLock sync.Mutex + conn *goledis.Conn app *App @@ -47,7 +59,7 @@ type master struct { wg sync.WaitGroup - syncBuf bytes.Buffer + syncBuf syncBuffer state sync2.AtomicInt32 } @@ -57,6 +69,7 @@ func newMaster(app *App) *master { m.app = app m.quit = make(chan struct{}, 1) + m.syncBuf = syncBuffer{m: m} m.state.Set(replConnectState) @@ -64,41 +77,45 @@ func newMaster(app *App) *master { } func (m *master) Close() { - m.quit <- struct{}{} + m.state.Set(replConnectState) + + if !m.isQuited() { + close(m.quit) + } m.closeConn() m.wg.Wait() - - select { - case <-m.quit: - default: - } - - m.state.Set(replConnectState) -} - -func (m *master) resetConn() error { - if len(m.addr) == 0 { - return fmt.Errorf("no assign master addr") - } - - if m.conn != nil { - m.conn.Close() - } - - m.conn = goledis.NewConn(m.addr) - - return nil } func (m *master) closeConn() { + m.connLock.Lock() + defer m.connLock.Unlock() + if m.conn != nil { //for replication, we send quit command to close gracefully - m.conn.Send("quit") + m.conn.SetReadDeadline(time.Now().Add(1 * time.Second)) m.conn.Close() } + + m.conn = nil +} + +func (m *master) checkConn() error { + m.connLock.Lock() + defer m.connLock.Unlock() + + var err error + if m.conn == nil { + m.conn, err = goledis.Connect(m.addr) + } else { + if _, err = m.conn.Do("PING"); err != nil { + m.conn.Close() + m.conn = nil + } + } + return err } func (m *master) stopReplication() error { @@ -115,60 +132,88 @@ func (m *master) startReplication(masterAddr string, restart bool) error { m.app.cfg.SetReadonly(true) + m.quit = make(chan struct{}, 1) + + if len(m.addr) == 0 { + return fmt.Errorf("no assign master addr") + } + m.wg.Add(1) go m.runReplication(restart) return nil } +func (m *master) isQuited() bool { + select { + case <-m.quit: + return true + default: + return false + } +} + func (m *master) runReplication(restart bool) { defer func() { m.state.Set(replConnectState) m.wg.Done() }() - m.state.Set(replConnectingState) - if err := m.resetConn(); err != nil { - log.Errorf("reset conn error %s", err.Error()) - return - } - for { - select { - case <-m.quit: + m.state.Set(replConnectState) + + if m.isQuited() { return - default: - if _, err := m.conn.Do("ping"); err != nil { - log.Errorf("ping master %s error %s, try 2s later", m.addr, err.Error()) - time.Sleep(2 * time.Second) - continue + } + + if err := m.checkConn(); err != nil { + log.Errorf("check master %s connection error %s, try 3s later", m.addr, err.Error()) + + select { + case <-time.After(3 * time.Second): + case <-m.quit: + return } + continue + } + + if m.isQuited() { + return } m.state.Set(replConnectedState) if err := m.replConf(); err != nil { - log.Errorf("replconf error %s", err.Error()) - return + if strings.Contains(err.Error(), ledis.ErrRplNotSupport.Error()) { + log.Fatalf("master doesn't support replication, wait 10s and retry") + select { + case <-time.After(10 * time.Second): + case <-m.quit: + return + } + } else { + log.Errorf("replconf error %s", err.Error()) + } + + continue } if restart { - m.state.Set(replSyncState) if err := m.fullSync(); err != nil { log.Errorf("restart fullsync error %s", err.Error()) - return + continue } + m.state.Set(replConnectedState) } for { - select { - case <-m.quit: + if err := m.sync(); err != nil { + log.Errorf("sync error %s", err.Error()) + break + } + m.state.Set(replConnectedState) + + if m.isQuited() { return - default: - m.state.Set(replConnectedState) - if err := m.sync(); err != nil { - log.Errorf("sync error %s", err.Error()) - return - } } } } @@ -198,6 +243,8 @@ func (m *master) fullSync() error { return err } + m.state.Set(replSyncState) + dumpPath := path.Join(m.app.cfg.DataDir, "master.dump") f, err := os.OpenFile(dumpPath, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { @@ -245,20 +292,20 @@ func (m *master) sync() error { return err } + m.state.Set(replConnectedState) + m.syncBuf.Reset() if err = m.conn.ReceiveBulkTo(&m.syncBuf); err != nil { - switch err.Error() { - case ledis.ErrLogMissed.Error(): + if strings.Contains(err.Error(), ledis.ErrLogMissed.Error()) { return m.fullSync() - case ledis.ErrRplNotSupport.Error(): - m.stopReplication() - return nil - default: + } else { return err } } + m.state.Set(replConnectedState) + buf := m.syncBuf.Bytes() if len(buf) < 8 { @@ -276,7 +323,6 @@ func (m *master) sync() error { return nil } - m.state.Set(replSyncState) if err = m.app.ldb.StoreLogsFromData(buf); err != nil { return err } @@ -302,6 +348,7 @@ func (app *App) slaveof(masterAddr string, restart bool, readonly bool) error { app.cfg.SlaveOf = masterAddr if len(masterAddr) == 0 { + log.Infof("slaveof no one, stop replication") if err := app.m.stopReplication(); err != nil { return err } @@ -347,9 +394,7 @@ func (app *App) removeSlave(c *client, activeQuit bool) { if _, ok := app.slaves[addr]; ok { delete(app.slaves, addr) log.Infof("remove slave %s", addr) - if activeQuit { - asyncNotifyUint64(app.slaveSyncAck, c.lastLogID.Get()) - } + asyncNotifyUint64(app.slaveSyncAck, c.lastLogID.Get()) } } diff --git a/upgrade/ledis-upgrade-ttl/main.go b/upgrade/ledis-upgrade-ttl/main.go index 96d1a28..d7ddbed 100644 --- a/upgrade/ledis-upgrade-ttl/main.go +++ b/upgrade/ledis-upgrade-ttl/main.go @@ -45,7 +45,7 @@ func main() { wb := db.NewWriteBatch() - for i := uint8(0); i < ledis.MaxDBNumber; i++ { + for i := uint8(0); i < cfg.Databases; i++ { minK, maxK := oldKeyPair(i) it := db.RangeIterator(minK, maxK, store.RangeROpen)