Merge branch 'develop'

This commit is contained in:
siddontang 2015-03-04 15:48:29 +08:00
commit b5e3c033c1
68 changed files with 2417 additions and 2549 deletions

View File

@ -2,7 +2,7 @@
[![Build Status](https://travis-ci.org/siddontang/ledisdb.svg?branch=develop)](https://travis-ci.org/siddontang/ledisdb)
Ledisdb is a high-performance NoSQL database, similar to Redis, written in [Go](http://golang.org/). It supports many data structures including kv, list, hash, zset, bitmap,set.
Ledisdb is a high-performance NoSQL database, similar to Redis, written in [Go](http://golang.org/). It supports many data structures including kv, list, hash, zset, set.
LedisDB now supports multiple different databases as backends.
@ -11,7 +11,7 @@ LedisDB now supports multiple different databases as backends.
## Features
+ Rich data structure: KV, List, Hash, ZSet, Bitmap, Set.
+ Rich data structure: KV, List, Hash, ZSet, Set.
+ Data storage is not limited by RAM.
+ Various backends supported: LevelDB, goleveldb, LMDB, RocksDB, BoltDB, RAM.
+ Supports transactions using LMDB or BotlDB.

View File

@ -2,6 +2,7 @@ package ledis
import (
"container/list"
"net"
"strings"
"sync"
)
@ -46,11 +47,34 @@ func NewClient(cfg *Config) *Client {
}
func (c *Client) Do(cmd string, args ...interface{}) (interface{}, error) {
co := c.get()
r, err := co.Do(cmd, args...)
c.put(co)
var co *Conn
var err error
var r interface{}
return r, err
for i := 0; i < 2; i++ {
co, err = c.get()
if err != nil {
return nil, err
}
r, err = co.Do(cmd, args...)
if err != nil {
co.finalize()
if e, ok := err.(*net.OpError); ok && strings.Contains(e.Error(), "use of closed network connection") {
//send to a closed connection, try again
continue
}
return nil, err
} else {
c.put(co)
}
return r, nil
}
return nil, err
}
func (c *Client) Close() {
@ -66,11 +90,11 @@ func (c *Client) Close() {
}
}
func (c *Client) Get() *Conn {
func (c *Client) Get() (*Conn, error) {
return c.get()
}
func (c *Client) get() *Conn {
func (c *Client) get() (*Conn, error) {
c.Lock()
if c.conns.Len() == 0 {
c.Unlock()
@ -83,7 +107,7 @@ func (c *Client) get() *Conn {
c.Unlock()
return co
return co, nil
}
}

View File

@ -8,8 +8,6 @@ import (
"io"
"net"
"strconv"
"strings"
"sync"
"time"
)
@ -19,50 +17,37 @@ type Error string
func (err Error) Error() string { return string(err) }
type Conn struct {
cm sync.Mutex
wm sync.Mutex
rm sync.Mutex
closed bool
client *Client
addr string
c net.Conn
br *bufio.Reader
bw *bufio.Writer
rSize int
wSize int
// Scratch space for formatting argument length.
// '*' or '$', length, "\r\n"
lenScratch [32]byte
// Scratch space for formatting integers and floats.
numScratch [40]byte
connectTimeout time.Duration
}
func NewConn(addr string) *Conn {
co := new(Conn)
co.addr = addr
co.rSize = 4096
co.wSize = 4096
co.closed = false
return co
func Connect(addr string) (*Conn, error) {
return ConnectWithSize(addr, 4096, 4096)
}
func NewConnSize(addr string, readSize int, writeSize int) *Conn {
co := NewConn(addr)
co.rSize = readSize
co.wSize = writeSize
return co
func ConnectWithSize(addr string, readSize int, writeSize int) (*Conn, error) {
c := new(Conn)
var err error
c.c, err = net.Dial(getProto(addr), addr)
if err != nil {
return nil, err
}
c.br = bufio.NewReaderSize(c.c, readSize)
c.bw = bufio.NewWriterSize(c.c, writeSize)
return c, nil
}
func (c *Conn) Close() {
@ -73,26 +58,12 @@ func (c *Conn) Close() {
}
}
func (c *Conn) SetConnectTimeout(t time.Duration) {
c.cm.Lock()
c.connectTimeout = t
c.cm.Unlock()
}
func (c *Conn) SetReadDeadline(t time.Time) {
c.cm.Lock()
if c.c != nil {
c.c.SetReadDeadline(t)
}
c.cm.Unlock()
c.c.SetReadDeadline(t)
}
func (c *Conn) SetWriteDeadline(t time.Time) {
c.cm.Lock()
if c.c != nil {
c.c.SetWriteDeadline(t)
}
c.cm.Unlock()
c.c.SetWriteDeadline(t)
}
func (c *Conn) Do(cmd string, args ...interface{}) (interface{}, error) {
@ -104,28 +75,6 @@ func (c *Conn) Do(cmd string, args ...interface{}) (interface{}, error) {
}
func (c *Conn) Send(cmd string, args ...interface{}) error {
var err error
for i := 0; i < 2; i++ {
if err = c.send(cmd, args...); err != nil {
if e, ok := err.(*net.OpError); ok && strings.Contains(e.Error(), "use of closed network connection") {
//send to a closed connection, try again
continue
}
} else {
return nil
}
}
return err
}
func (c *Conn) send(cmd string, args ...interface{}) error {
if err := c.connect(); err != nil {
return err
}
c.wm.Lock()
defer c.wm.Unlock()
if err := c.writeCommand(cmd, args); err != nil {
c.finalize()
return err
@ -139,9 +88,6 @@ func (c *Conn) send(cmd string, args ...interface{}) error {
}
func (c *Conn) Receive() (interface{}, error) {
c.rm.Lock()
defer c.rm.Unlock()
if reply, err := c.readReply(); err != nil {
c.finalize()
return nil, err
@ -155,9 +101,6 @@ func (c *Conn) Receive() (interface{}, error) {
}
func (c *Conn) ReceiveBulkTo(w io.Writer) error {
c.rm.Lock()
defer c.rm.Unlock()
err := c.readBulkReplyTo(w)
if err != nil {
if _, ok := err.(Error); !ok {
@ -168,44 +111,7 @@ func (c *Conn) ReceiveBulkTo(w io.Writer) error {
}
func (c *Conn) finalize() {
c.cm.Lock()
if !c.closed {
if c.c != nil {
c.c.Close()
}
c.closed = true
}
c.cm.Unlock()
}
func (c *Conn) connect() error {
c.cm.Lock()
defer c.cm.Unlock()
if !c.closed && c.c != nil {
return nil
}
var err error
c.c, err = net.DialTimeout(getProto(c.addr), c.addr, c.connectTimeout)
if err != nil {
c.c = nil
return err
}
if c.br != nil {
c.br.Reset(c.c)
} else {
c.br = bufio.NewReaderSize(c.c, c.rSize)
}
if c.bw != nil {
c.bw.Reset(c.c)
} else {
c.bw = bufio.NewWriterSize(c.c, c.wSize)
}
return nil
c.c.Close()
}
func (c *Conn) writeLen(prefix byte, n int) error {
@ -447,9 +353,12 @@ func (c *Conn) readReply() (interface{}, error) {
return nil, errors.New("ledis: unexpected response line")
}
func (c *Client) newConn(addr string) *Conn {
co := NewConnSize(addr, c.cfg.ReadBufferSize, c.cfg.WriteBufferSize)
func (c *Client) newConn(addr string) (*Conn, error) {
co, err := ConnectWithSize(addr, c.cfg.ReadBufferSize, c.cfg.WriteBufferSize)
if err != nil {
return nil, err
}
co.client = c
return co
return co, nil
}

View File

@ -3,7 +3,7 @@ package main
import (
"flag"
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"math/rand"
"runtime"
"strings"
@ -38,7 +38,7 @@ func bench(cmd string, f func(c *ledis.Conn)) {
t1 := time.Now()
for i := 0; i < *clients; i++ {
go func() {
c := client.Get()
c, _ := client.Get()
for j := 0; j < loop; j++ {
f(c)
}
@ -277,7 +277,7 @@ func main() {
client = ledis.NewClient(cfg)
for i := 0; i < *clients; i++ {
c := client.Get()
c, _ := client.Get()
c.Close()
}

View File

@ -1,28 +1,16 @@
//This file was generated by .tools/generate_commands.py on Fri Feb 06 2015 09:15:18 +0800
//This file was generated by .tools/generate_commands.py on Wed Mar 04 2015 09:31:59 +0800
package main
var helpCommands = [][]string{
{"APPEND", "key value", "KV"},
{"BCOUNT", "key [start end]", "Bitmap"},
{"BDELETE", "key", "ZSet"},
{"BEGIN", "-", "Transaction"},
{"BEXPIRE", "key seconds", "Bitmap"},
{"BEXPIREAT", "key timestamp", "Bitmap"},
{"BGET", "key", "Bitmap"},
{"BGETBIT", "key offset", "Bitmap"},
{"BITCOUNT", "key [start] [end]", "KV"},
{"BITOP", "operation destkey key [key ...]", "KV"},
{"BITPOS", "key bit [start] [end]", "KV"},
{"BLPOP", "key [key ...] timeout", "List"},
{"BMSETBIT", "key offset value [offset value ...]", "Bitmap"},
{"BOPT", "operation destkey key [key ...]", "Bitmap"},
{"BPERSIST", "key", "Bitmap"},
{"BRPOP", "key [key ...] timeout", "List"},
{"BSETBIT", "key offset value", "Bitmap"},
{"BTTL", "key", "Bitmap"},
{"BXREVSCAN", "key [MATCH match] [COUNT count]", "Bitmap"},
{"BXSCAN", "key [MATCH match] [COUNT count]", "Bitmap"},
{"COMMIT", "-", "Transaction"},
{"CONFIG GET", "parameter", "Server"},
{"CONFIG REWRITE", "-", "Server"},
{"DECR", "key", "KV"},
{"DECRBY", "key decrement", "KV"},
@ -50,6 +38,7 @@ var helpCommands = [][]string{
{"HGET", "key field", "Hash"},
{"HGETALL", "key", "Hash"},
{"HINCRBY", "key field increment", "Hash"},
{"HKEYEXISTS", "key", "Hash"},
{"HKEYS", "key", "Hash"},
{"HLEN", "key", "Hash"},
{"HMCLEAR", "key [key ...]", "Hash"},
@ -59,8 +48,6 @@ var helpCommands = [][]string{
{"HSET", "key field value", "Hash"},
{"HTTL", "key", "Hash"},
{"HVALS", "key", "Hash"},
{"HXREVSCAN", "key [MATCH match] [COUNT count]", "Hash"},
{"HXSCAN", "key [MATCH match] [COUNT count]", "Hash"},
{"INCR", "key", "KV"},
{"INCRBY", "key increment", "KV"},
{"INFO", "[section]", "Server"},
@ -69,6 +56,7 @@ var helpCommands = [][]string{
{"LEXPIRE", "key seconds", "List"},
{"LEXPIREAT", "key timestamp", "List"},
{"LINDEX", "key index", "List"},
{"LKEYEXISTS", "key", "List"},
{"LLEN", "key", "List"},
{"LMCLEAR", "key [key ...]", "List"},
{"LPERSIST", "key", "List"},
@ -76,8 +64,6 @@ var helpCommands = [][]string{
{"LPUSH", "key value [value ...]", "List"},
{"LRANGE", "key start stop", "List"},
{"LTTL", "key", "List"},
{"LXREVSCAN", "key [MATCH match] [COUNT count]", "List"},
{"LXSCAN", "key [MATCH match] [COUNT count]", "List"},
{"MGET", "key [key ...]", "KV"},
{"MSET", "key value [key value ...]", "KV"},
{"PERSIST", "key", "KV"},
@ -107,6 +93,7 @@ var helpCommands = [][]string{
{"SINTER", "key [key ...]", "Set"},
{"SINTERSTORE", "destination key [key ...]", "Set"},
{"SISMEMBER", "key member", "Set"},
{"SKEYEXISTS", "key", "Set"},
{"SLAVEOF", "host port [RESTART] [READONLY]", "Replication"},
{"SMCLEAR", "key [key ...]", "Set"},
{"SMEMBERS", "key", "Set"},
@ -116,23 +103,13 @@ var helpCommands = [][]string{
{"STTL", "key", "Set"},
{"SUNION", "key [key ...]", "Set"},
{"SUNIONSTORE", "destination key [key ...]", "Set"},
{"SXREVSCAN", "key [MATCH match] [COUNT count]", "Set"},
{"SXSCAN", "key [MATCH match] [COUNT count]", "Set"},
{"SYNC", "logid", "Replication"},
{"TIME", "-", "Server"},
{"TTL", "key", "KV"},
{"XBREVSCAN", "key [MATCH match] [COUNT count]", "Bitmap"},
{"XBSCAN", "key [MATCH match] [COUNT count]", "Bitmap"},
{"XHREVSCAN", "key [MATCH match] [COUNT count]", "Hash"},
{"XHSCAN", "key [MATCH match] [COUNT count]", "Hash"},
{"XLREVSCAN", "key [MATCH match] [COUNT count]", "List"},
{"XLSCAN", "key [MATCH match] [COUNT count]", "List"},
{"XREVSCAN", "key [MATCH match] [COUNT count]", "KV"},
{"XSCAN", "key [MATCH match] [COUNT count]", "KV"},
{"XSREVSCAN", "key [MATCH match] [COUNT count]", "Set"},
{"XSSCAN", "key [MATCH match] [COUNT count]", "Set"},
{"XZREVSCAN", "key [MATCH match] [COUNT count]", "ZSet"},
{"XZSCAN", "key [MATCH match] [COUNT count]", "ZSet"},
{"XHSCAN", "key cursor [MATCH match] [COUNT count]", "Hash"},
{"XSCAN", "type cursor [MATCH match] [COUNT count]", "Server"},
{"XSSCAN", "key cursor [MATCH match] [COUNT count]", "Set"},
{"XZSCAN", "key cursor [MATCH match] [COUNT count]", "ZSet"},
{"ZADD", "key score member [score member ...]", "ZSet"},
{"ZCARD", "key", "ZSet"},
{"ZCLEAR", "key", "ZSet"},
@ -142,6 +119,7 @@ var helpCommands = [][]string{
{"ZEXPIREAT", "key timestamp", "ZSet"},
{"ZINCRBY", "key increment member", "ZSet"},
{"ZINTERSTORE", "destkey numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]", "ZSet"},
{"ZKEYEXISTS", "ZSet", "Hash"},
{"ZLEXCOUNT", "key min max", "ZSet"},
{"ZMCLEAR", "key [key ...]", "ZSet"},
{"ZPERSIST", "key", "ZSet"},
@ -159,6 +137,4 @@ var helpCommands = [][]string{
{"ZSCORE", "key member", "ZSet"},
{"ZTTL", "key", "ZSet"},
{"ZUNIONSTORE", "destkey numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]", "ZSet"},
{"ZXREVSCAN", "key [MATCH match] [COUNT count]", "ZSet"},
{"ZXSCAN", "key [MATCH match] [COUNT count]", "ZSet"},
}

View File

@ -3,7 +3,7 @@ package main
import (
"flag"
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"regexp"
"strconv"
"strings"

View File

@ -3,7 +3,7 @@ package main
import (
"flag"
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"os"
)
@ -32,7 +32,11 @@ func main() {
addr = fmt.Sprintf("%s:%d", *host, *port)
}
c := ledis.NewConnSize(addr, 16*1024, 4096)
c, err := ledis.ConnectWithSize(addr, 16*1024, 4096)
if err != nil {
println(err.Error())
return
}
defer c.Close()

View File

@ -11,6 +11,7 @@ import (
"os"
"os/signal"
"runtime"
"strings"
"syscall"
)
@ -64,7 +65,16 @@ func main() {
cfg.UseReplication = true
} else {
cfg.Readonly = *readonly
cfg.UseReplication = *rpl
// if rpl in command flag, use it.
for _, arg := range os.Args {
arg := strings.ToLower(arg)
if arg == "-rpl" || arg == "-rpl=true" || arg == "-rpl=false" {
cfg.UseReplication = *rpl
break
}
}
cfg.Replication.Sync = *rplSync
}

View File

@ -102,6 +102,8 @@ type Config struct {
DataDir string `toml:"data_dir"`
Databases uint8 `toml:"databases"`
DBName string `toml:"db_name"`
DBPath string `toml:"db_path"`
DBSyncCommit int `toml:"db_sync_commit"`
@ -165,6 +167,9 @@ func NewConfigDefault() *Config {
cfg.SlaveOf = ""
cfg.Readonly = false
// default databases number
cfg.Databases = 16
// disable access log
cfg.AccessLog = ""
@ -209,7 +214,6 @@ func (cfg *Config) adjust() {
cfg.ConnReadBufferSize = getDefault(4*KB, cfg.ConnReadBufferSize)
cfg.ConnWriteBufferSize = getDefault(4*KB, cfg.ConnWriteBufferSize)
cfg.TTLCheckInterval = getDefault(1, cfg.TTLCheckInterval)
}
func (cfg *LevelDBConfig) adjust() {

View File

@ -9,6 +9,11 @@ http_addr = "127.0.0.1:11181"
# Data store path, all ledisdb's data will be saved here
data_dir = "/tmp/ledis_server"
# Set the number of databases. You can use `select dbindex` to choose a db.
# dbindex must be in [0, databases - 1].
# Maximum databases is 256.
databases = 16
# Log server command, set empty to disable
access_log = ""

View File

@ -5,10 +5,6 @@ so you can easily write your own LedisDB client based on a Redis one.
Before you write a client, you must know some differences between LedisDB and Redis.
## Data Structure
LedisDB has no Strings data type but KV and Bitmap, any some Keys and Strings commands in Redis will only affect KV data, and "bit" commands affect Bitmap.
## Del
In Redis, `del` can delete all type data, like String, Hash, List, etc, but in LedisDB, `del` can only delete KV data. To delete other type data, you will use "clear" commands.
@ -17,8 +13,7 @@ In Redis, `del` can delete all type data, like String, Hash, List, etc, but in L
+ Hash: `hclear`, `hmclear`
+ List: `lclear`, `lmclear`
+ Set: `sclear`, `smclear`
+ Zset: `zclear`, `zmclear`
+ Bitmap: `bclear`, `bmclear`
+ ZSet: `zclear`, `zmclear`
## Expire, Persist, and TTL
@ -29,7 +24,6 @@ The same for Del.
+ List: `lexpire`, `lpersist`, `lttl`
+ Set: `sexpire`, `spersist`, `sttl`
+ Zset: `zexpire`, `zpersist`, `zttl`
+ Bitmap: `bexpire`, `bpersist`, `bttl`
## ZSet
@ -47,14 +41,14 @@ Transaction API:
## Scan
LedisDB supplies `xscan`, `xrevscan`, etc, to fetch data iteratively and reverse iteratively.
LedisDB supplies `xscan`, `xhscan`, `xsscan`, `xzscan` to fetch data iteratively and reverse iteratively.
+ KV: `xscan`, `xrevscan`
+ Hash: `hxscan`, `hxrevscan`, `xhscan`, `xhrevscan`
+ List: `lxscan`, `lxrevscan`, `xlscan`, `xlrevscan`
+ Set: `sxscan` , `sxrevscan`, `xsscan`, `xsrevscan`
+ Zset: `zxscan`, `zxrevscan`, `xzscan`, `xzrevscan`
+ Bitmap: `bxscan`, `bxrevscan`, `xbscan`, `xbrevscan`
```
XSCAN type cursor [MATCH match] [COUNT count]
XHSCAN key cursor [MATCH match] [COUNT count]
XSSCAN key cursor [MATCH match] [COUNT count]
XZSCAN key cursor [MATCH match] [COUNT count]
```
## DUMP

View File

@ -1,59 +1,4 @@
{
"BCOUNT": {
"arguments": "key [start end]",
"group": "Bitmap",
"readonly": true
},
"BDELETE": {
"arguments": "key",
"group": "ZSet",
"readonly": false
},
"BEXPIRE": {
"arguments": "key seconds",
"group": "Bitmap",
"readonly": false
},
"BEXPIREAT": {
"arguments": "key timestamp",
"group": "Bitmap",
"readonly": false
},
"BGET": {
"arguments": "key",
"group": "Bitmap",
"readonly": true
},
"BGETBIT": {
"arguments": "key offset",
"group": "Bitmap",
"readonly": true
},
"BMSETBIT": {
"arguments": "key offset value [offset value ...]",
"group": "Bitmap",
"readonly": false
},
"BOPT": {
"arguments": "operation destkey key [key ...]",
"group": "Bitmap",
"readonly": false
},
"BPERSIST": {
"arguments": "key",
"group": "Bitmap",
"readonly": false
},
"BSETBIT": {
"arguments": "key offset value",
"group": "Bitmap",
"readonly": false
},
"BTTL": {
"arguments": "key",
"group": "Bitmap",
"readonly": true
},
"DECR": {
"arguments": "key",
"group": "KV",
@ -560,79 +505,7 @@
"group": "Transaction",
"readonly": false
},
"XSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "KV",
"readonly": true
},
"HXSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Hash",
"readonly": true
},
"LXSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "List",
"readonly": true
},
"SXSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Set",
"readonly": true
},
"ZXSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "ZSet",
"readonly": true
},
"BXSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Bitmap",
"readonly": true
},
"XREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "KV",
"readonly": true
},
"HXREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Hash",
"readonly": true
},
"LXREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "List",
"readonly": true
},
"SXREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Set",
"readonly": true
},
"ZXREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "ZSet",
"readonly": true
},
"BXREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Bitmap",
"readonly": true
},
"FLUSHALL": {
"arguments": "-",
"group": "Server",
@ -693,6 +566,12 @@
"readonly": false
},
"CONFIG GET": {
"arguments" : "parameter",
"group": "Server",
"readonly": true
},
"DUMP": {
"arguments" : "key",
"group": "KV",
@ -724,66 +603,30 @@
"readonly": true
},
"XBSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Bitmap",
"readonly": true
},
"XLSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "List",
"XSCAN": {
"arguments": "type cursor [MATCH match] [COUNT count]",
"group": "Server",
"readonly": true
},
"XHSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"arguments": "key cursor [MATCH match] [COUNT count]",
"group": "Hash",
"readonly": true
},
"XSSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"arguments": "key cursor [MATCH match] [COUNT count]",
"group": "Set",
"readonly": true
},
"XZSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"arguments": "key cursor [MATCH match] [COUNT count]",
"group": "ZSet",
"readonly": true
},
"XHREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Hash",
"readonly": true
},
"XLREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "List",
"readonly": true
},
"XSREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Set",
"readonly": true
},
"XZREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "ZSet",
"readonly": true
},
"XBREVSCAN": {
"arguments": "key [MATCH match] [COUNT count]",
"group": "Bitmap",
"readonly": true
},
"RESTORE": {
"arguments" : "key ttl value",
"group" : "Server",
@ -848,5 +691,29 @@
"arguments" : "key offset value",
"group" : "KV",
"readonly" : false
},
"HKEYEXISTS": {
"arguments" : "key",
"group" : "Hash",
"readonly" : true
},
"LKEYEXISTS": {
"arguments" : "key",
"group" : "List",
"readonly" : true
},
"SKEYEXISTS": {
"arguments" : "key",
"group" : "Set",
"readonly" : true
},
"ZKEYEXISTS": {
"arguments" : "ZSet",
"group" : "Hash",
"readonly" : true
}
}

View File

@ -1,175 +1,161 @@
## Summary
ledisdb use redis protocol called RESP(REdis Serialization Protocol), [here](http://redis.io/topics/protocol).
Ledisdb use redis protocol called RESP(REdis Serialization Protocol), [here](http://redis.io/topics/protocol).
ledisdb all commands return RESP format and it will use `int64` instead of `RESP integer`, `string` instead of `RESP simple string`, `bulk string` instead of `RESP bulk string`, and `array` instead of `RESP arrays` below.
Ledisdb all commands return RESP format and it will use `int64` instead of `RESP integer`, `string` instead of `RESP simple string`, `bulk string` instead of `RESP bulk string`, and `array` instead of `RESP arrays` below.
Table of Contents
=================
Most of the Ledisdb's commands are the same as Redis's, you can see the redis commands document for detailed information too.
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Commands List**
- [Summary](#summary)
- [KV](#kv)
- [DECR key](#decr-key)
- [DECRBY key decrement](#decrby-key-decrement)
- [DEL key [key ...]](#del-key-key-)
- [EXISTS key](#exists-key)
- [GET key](#get-key)
- [GETSET key value](#getset-key-value)
- [INCR key](#incr-key)
- [INCRBY key increment](#incrby-key-increment)
- [MGET key [key ...]](#mget-key-key-)
- [MSET key value [key value ...]](#mset-key-value-key-value-)
- [SET key value](#set-key-value)
- [SETNX key value](#setnx-key-value)
- [SETEX key seconds value](#setex-key-seconds-value)
- [EXPIRE key seconds](#expire-key-seconds)
- [EXPIREAT key timestamp](#expireat-key-timestamp)
- [TTL key](#ttl-key)
- [PERSIST key](#persist-key)
- [XSCAN key [MATCH match] [COUNT count]](#xscan-key-match-match-count-count)
- [XREVSCAN key [MATCH match] [COUNT count]](#xrevscan-key-match-match-count-count)
- [DUMP key](#dump-key)
- [DECR key](#decr-key)
- [DECRBY key decrement](#decrby-key-decrement)
- [DEL key [key ...]](#del-key-key-)
- [EXISTS key](#exists-key)
- [GET key](#get-key)
- [GETSET key value](#getset-key-value)
- [INCR key](#incr-key)
- [INCRBY key increment](#incrby-key-increment)
- [MGET key [key ...]](#mget-key-key-)
- [MSET key value [key value ...]](#mset-key-value-key-value-)
- [SET key value](#set-key-value)
- [SETNX key value](#setnx-key-value)
- [SETEX key seconds value](#setex-key-seconds-value)
- [EXPIRE key seconds](#expire-key-seconds)
- [EXPIREAT key timestamp](#expireat-key-timestamp)
- [TTL key](#ttl-key)
- [PERSIST key](#persist-key)
- [DUMP key](#dump-key)
- [APPEND key value](#append-key-value)
- [GETRANGE key start end](#getrange-key-start-end)
- [SETRANGE key offset value](#setrange-key-offset-value)
- [STRLEN key](#strlen-key)
- [BITCOUNT key [start] [end]](#bitcount-key-start-end)
- [BITOP operation destkey key [key ...]](#bitop-operation-destkey-key-key-)
- [BITPOS key bit [start] [end]](#bitpos-key-bit-start-end)
- [GETBIT key offset](#getbit-key-offset)
- [SETBIT key offset value](#setbit-key-offset-value)
- [Hash](#hash)
- [HDEL key field [field ...]](#hdel-key-field-field-)
- [HEXISTS key field](#hexists-key-field)
- [HGET key field](#hget-key-field)
- [HGETALL key](#hgetall-key)
- [HINCRBY key field increment](#hincrby-key-field-increment)
- [HKEYS key](#hkeys-key)
- [HLEN key](#hlen-key)
- [HMGET key field [field ...]](#hmget-key-field-field-)
- [HMSET key field value [field value ...]](#hmset-key-field-value-field-value-)
- [HSET key field value](#hset-key-field-value)
- [HVALS key](#hvals-key)
- [HCLEAR key](#hclear-key)
- [HMCLEAR key [key ...]](#hmclear-key-key)
- [HEXPIRE key seconds](#hexpire-key-seconds)
- [HEXPIREAT key timestamp](#hexpireat-key-timestamp)
- [HTTL key](#httl-key)
- [HPERSIST key](#hpersist-key)
- [HXSCAN key [MATCH match] [COUNT count]](#hxscan-key-match-match-count-count)
- [HXREVSCAN key [MATCH match] [COUNT count]](#hxrevscan-key-match-match-count-count)
- [XHSCAN key [MATCH match] [COUNT count]](#xhscan-key-match-match-count-count)
- [XHREVSCAN key [MATCH match] [COUNT count]](#xhrevscan-key-match-match-count-count)
- [HDUMP key](#hdump-key)
- [HDEL key field [field ...]](#hdel-key-field-field-)
- [HEXISTS key field](#hexists-key-field)
- [HGET key field](#hget-key-field)
- [HGETALL key](#hgetall-key)
- [HINCRBY key field increment](#hincrby-key-field-increment)
- [HKEYS key](#hkeys-key)
- [HLEN key](#hlen-key)
- [HMGET key field [field ...]](#hmget-key-field-field-)
- [HMSET key field value [field value ...]](#hmset-key-field-value-field-value-)
- [HSET key field value](#hset-key-field-value)
- [HVALS key](#hvals-key)
- [HCLEAR key](#hclear-key)
- [HMCLEAR key [key...]](#hmclear-key-key)
- [HEXPIRE key seconds](#hexpire-key-seconds)
- [HEXPIREAT key timestamp](#hexpireat-key-timestamp)
- [HTTL key](#httl-key)
- [HPERSIST key](#hpersist-key)
- [HDUMP key](#hdump-key)
- [HKEYEXISTS key](#hkeyexists-key)
- [List](#list)
- [BLPOP key [key ...] timeout](#blpop-key-key--timeout)
- [BRPOP key [key ...] timeout](#brpop-key-key--timeout)
- [LINDEX key index](#lindex-key-index)
- [LLEN key](#llen-key)
- [LPOP key](#lpop-key)
- [LRANGE key start stop](#lrange-key-start-stop)
- [LPUSH key value [value ...]](#lpush-key-value-value-)
- [RPOP key](#rpop-keuser-content-y)
- [RPUSH key value [value ...]](#rpush-key-value-value-)
- [LCLEAR key](#lclear-key)
- [LMCLEAR key [key...]](#lmclear-key-key-)
- [LEXPIRE key seconds](#lexpire-key-seconds)
- [LEXPIREAT key timestamp](#lexpireat-key-timestamp)
- [LTTL key](#lttl-key)
- [LPERSIST key](#lpersist-key)
- [LXSCAN key [MATCH match] [COUNT count]](#lxscan-key-match-match-count-count)
- [LXREVSCAN key [MATCH match] [COUNT count]](#lxrevscan-key-match-match-count-count)
- [XLSCAN key [MATCH match] [COUNT count]](#xlscan-key-match-match-count-count)
- [XLREVSCAN key [MATCH match] [COUNT count]](#xlrevscan-key-match-match-count-count)
- [LDUMP key](#ldump-key)
- [BLPOP key [key ...] timeout](#blpop-key-key--timeout)
- [BRPOP key [key ...] timeout](#brpop-key-key--timeout)
- [LINDEX key index](#lindex-key-index)
- [LLEN key](#llen-key)
- [LPOP key](#lpop-key)
- [LRANGE key start stop](#lrange-key-start-stop)
- [LPUSH key value [value ...]](#lpush-key-value-value-)
- [RPOP key](#rpop-key)
- [RPUSH key value [value ...]](#rpush-key-value-value-)
- [LCLEAR key](#lclear-key)
- [LMCLEAR key [key ...]](#lmclear-key-key-)
- [LEXPIRE key seconds](#lexpire-key-seconds)
- [LEXPIREAT key timestamp](#lexpireat-key-timestamp)
- [LTTL key](#lttl-key)
- [LPERSIST key](#lpersist-key)
- [LDUMP key](#ldump-key)
- [LKEYEXISTS key](#lkeyexists-key)
- [Set](#set)
- [SADD key member [member ...]](#sadd-key-member-member-)
- [SCARD key](#scard-key)
- [SDIFF key [key ...]](#sdiff-key-key-)
- [SDIFFSTORE destination key [key ...]](#sdiffstore-destination-key-key-)
- [SINTER key [key ...]](#sinter-key-key-)
- [SINTERSTORE destination key [key ...]](#sinterstore-destination-key-key-)
- [SISMEMBER key member](#sismember-key-member)
- [SMEMBERS key](#smembers-key)
- [SREM key member [member ...]](#srem-key-member-member-)
- [SUNION key [key ...]](#sunion-key-key-)
- [SUNIONSTORE destination key [key ...]](#sunionstore-destination-key-key-)
- [SCLEAR key](#sclear-key)
- [SMCLEAR key [key...]](#smclear-key-key)
- [SEXPIRE key seconds](#sexpire-key-seconds)
- [SEXPIREAT key timestamp](#sexpireat-key-timestamp)
- [STTL key](#sttl-key)
- [SPERSIST key](#spersist-key)
- [SXSCAN key [MATCH match] [COUNT count]](#sxscan-key-match-match-count-count)
- [SXREVSCAN key [MATCH match] [COUNT count]](#sxrevscan-key-match-match-count-count)
- [XSSCAN key [MATCH match] [COUNT count]](#xsscan-key-match-match-count-count)
- [XSREVSCAN key [MATCH match] [COUNT count]](#xsrevscan-key-match-match-count-count)
- [SDUMP key](#sdump-key)
- [SADD key member [member ...]](#sadd-key-member-member-)
- [SCARD key](#scard-key)
- [SDIFF key [key ...]](#sdiff-key-key-)
- [SDIFFSTORE destination key [key ...]](#sdiffstore-destination-key-key-)
- [SINTER key [key ...]](#sinter-key-key-)
- [SINTERSTORE destination key [key ...]](#sinterstore--destination-key-key-)
- [SISMEMBER key member](#sismember--key-member)
- [SMEMBERS key](#smembers-key)
- [SREM key member [member ...]](#srem--key-member-member-)
- [SUNION key [key ...]](#sunion-key-key-)
- [SUNIONSTORE destination key [key]](#sunionstore-destination-key-key)
- [SCLEAR key](#sclear-key)
- [SMCLEAR key [key ...]](#smclear-key-key-)
- [SEXPIRE key seconds](#sexpire-key-seconds)
- [SEXPIREAT key timestamp](#sexpireat-key-timestamp)
- [STTL key](#sttl-key)
- [SPERSIST key](#spersist-key)
- [SDUMP key](#sdump-key)
- [SKEYEXISTS key](#skeyexists-key)
- [ZSet](#zset)
- [ZADD key score member [score member ...]](#zadd-key-score-member-score-member-)
- [ZCARD key](#zcard-key)
- [ZCOUNT key min max](#zcount-key-min-max)
- [ZINCRBY key increment member](#zincrby-key-increment-member)
- [ZRANGE key start stop [WITHSCORES]](#zrange-key-start-stop-withscores)
- [ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]](#zrangebyscore-key-min-max-withscores-limit-offset-count)
- [ZRANK key member](#zrank-key-member)
- [ZREM key member [member ...]](#zrem-key-member-member-)
- [ZREMRANGEBYRANK key start stop](#zremrangebyrank-key-start-stop)
- [ZREMRANGEBYSCORE key min max](#zremrangebyscore-key-min-max)
- [ZREVRANGE key start stop [WITHSCORES]](#zrevrange-key-start-stop-withscores)
- [ZREVRANGEBYSCORE key max min [WITHSCORES] [LIMIT offset count]](#zrevrangebyscore-key-max-min-withscores-limit-offset-count)
- [ZREVRANK key member](#zrevrank-key-member)
- [ZSCORE key member](#zscore-key-member)
- [ZCLEAR key](#zclear-key)
- [ZMCLEAR key [key ...]](#zmclear-key-key-)
- [ZEXPIRE key seconds](#zexpire-key-seconds)
- [ZEXPIREAT key timestamp](#zexpireat-key-timestamp)
- [ZTTL key](#zttl-key)
- [ZPERSIST key](#zpersist-key)
- [ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]
](#zunionstore-destination-numkeys-key-key--weights-weight-weight--aggregate-summinmax)
- [ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]
](#zinterstore-destination-numkeys-key-key--weights-weight-weight--aggregate-summinmax)
- [ZXSCAN key [MATCH match] [COUNT count]](#zxscan-key-match-match-count-count)
- [ZXREVSCAN key [MATCH match] [COUNT count]](#zxrevscan-key-match-match-count-count)
- [XZSCAN key [MATCH match] [COUNT count]](#xzscan-key-match-match-count-count)
- [XZREVSCAN key [MATCH match] [COUNT count]](#xzrevscan-key-match-match-count-count)
- [ZRANGEBYLEX key min max [LIMIT offset count]](#zrangebylex-key-min-max-limit-offset-count)
- [ZREMRANGEBYLEX key min max](#zremrangebylex-key-min-max)
- [ZLEXCOUNT key min max](#zlexcount-key-min-max)
- [ZDUMP key](#zdump-key)
- [Bitmap](#bitmap)
- [BGET key](#bget-key)
- [BGETBIT key offset](#bgetbit-key-offset)
- [BSETBIT key offset value](#bsetbit-key-offset-value)
- [BMSETBIT key offset value[offset value ...]](#bmsetbit-key-offset-value-offset-value-)
- [BOPT operation destkey key [key ...]](#bopt-operation-destkey-key-key-)
- [BCOUNT key [start, end]](#bcount-key-start-end)
- [BEXPIRE key seconds](#bexpire-key-seconds)
- [BEXPIREAT key timestamp](#bexpireat-key-timestamp)
- [BTTL key](#bttl-key)
- [BPERSIST key](#bpersist-key)
- [BXSCAN key [MATCH match] [COUNT count]](#bxscan-key-match-match-count-count)
- [BXREVSCAN key [MATCH match] [COUNT count]](#bxrevscan-key-match-match-count-count)
- [XBSCAN key [MATCH match] [COUNT count]](#xbscan-key-match-match-count-count)
- [XBREVSCAN key [MATCH match] [COUNT count]](#xbrevscan-key-match-match-count-count)
- [ZADD key score member [score member ...]](#zadd-key-score-member-score-member-)
- [ZCARD key](#zcard-key)
- [ZCOUNT key min max](#zcount-key-min-max)
- [ZINCRBY key increment member](#zincrby-key-increment-member)
- [ZRANGE key start stop [WITHSCORES]](#zrange-key-start-stop-withscores)
- [ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]](#zrangebyscore-key-min-max-withscores-limit-offset-count)
- [ZRANK key member](#zrank-key-member)
- [ZREM key member [member ...]](#zrem-key-member-member-)
- [ZREMRANGEBYRANK key start stop](#zremrangebyrank-key-start-stop)
- [ZREMRANGEBYSCORE key min max](#zremrangebyscore-key-min-max)
- [ZREVRANGE key start stop [WITHSCORES]](#zrevrange-key-start-stop-withscores)
- [ZREVRANGEBYSCORE key max min [WITHSCORES] [LIMIT offset count]](#zrevrangebyscore--key-max-min-withscores-limit-offset-count)
- [ZREVRANK key member](#zrevrank-key-member)
- [ZSCORE key member](#zscore-key-member)
- [ZCLEAR key](#zclear-key)
- [ZMCLEAR key [key ...]](#zmclear-key-key-)
- [ZEXPIRE key seconds](#zexpire-key-seconds)
- [ZEXPIREAT key timestamp](#zexpireat-key-timestamp)
- [ZTTL key](#zttl-key)
- [ZPERSIST key](#zpersist-key)
- [ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]](#zunionstore-destination-numkeys-key-key--weights-weight-weight--aggregate-sum|min|max)
- [ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]](#zinterstore-destination-numkeys-key-key--weights-weight-weight--aggregate-sum|min|max)
- [ZRANGEBYLEX key min max [LIMIT offset count]](#zrangebylex-key-min-max-limit-offset-count)
- [ZREMRANGEBYLEX key min max](#zremrangebylex-key-min-max)
- [ZLEXCOUNT key min max](#zlexcount-key-min-max)
- [ZDUMP key](#zdump-key)
- [ZKEYEXISTS key](#zkeyexists-key)
- [Scan](#scan)
- [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count)
- [XHSCAN key cursor [MATCH match] [COUNT count]](#xhscan-key-cursor-match-match-count-count)
- [XSSCAN key cursor [MATCH match] [COUNT count]](#xsscan-key-cursor-match-match-count-count)
- [XZSCAN key cursor [MATCH match] [COUNT count]](#xzscan-key-cursor-match-match-count-count)
- [Replication](#replication)
- [SLAVEOF host port [RESTART] [READONLY]](#slaveof-host-port-restart-readonly)
- [FULLSYNC [NEW]](#fullsync-new)
- [SYNC logid](#sync-logid)
- [SLAVEOF host port [RESTART] [READONLY]](#slaveof-host-port-restart-readonly)
- [FULLSYNC [NEW]](#fullsync-new)
- [SYNC logid](#sync-logid)
- [Server](#server)
- [PING](#ping)
- [ECHO message](#echo-message)
- [SELECT index](#select-index)
- [FLUSHALL](#flushall)
- [FLUSHDB](#flushdb)
- [INFO [section]](#info-section)
- [TIME](#time)
- [CONFIG REWRITE](#config-rewrite)
- [RESTORE key ttl value](#restore-key-ttl-value)
- [ROLE](#role)
- [PING](#ping)
- [ECHO message](#echo-message)
- [SELECT index](#select-index)
- [FLUSHALL](#flushall)
- [FLUSHDB](#flushdb)
- [INFO [section]](#info-section)
- [TIME](#time)
- [CONFIG REWRITE](#config-rewrite)
- [RESTORE key ttl value](#restore-key-ttl-value)
- [ROLE](#role)
- [Transaction](#transaction)
- [BEGIN](#begin)
- [ROLLBACK](#rollback)
- [COMMIT](#commit)
- [BEGIN](#begin)
- [ROLLBACK](#rollback)
- [COMMIT](#commit)
- [Script](#script)
- [EVAL script numkeys key [key ...] arg [arg ...]](#eval-script-numkeys-key-key--arg-arg-)
- [EVALSHA sha1 numkeys key [key ...] arg [arg ...]](#evalsha-sha1-numkeys-key-key--arg-arg-)
- [SCRIPT LOAD script](#script-load-script)
- [SCRIPT EXISTS script [script ...]](#script-exists-script-script-)
- [SCRIPT FLUSH](#script-flush)
- [EVAL script numkeys key [key ...] arg [arg ...]](#eval-script-numkeys-key-key--arg-arg-)
- [EVALSHA sha1 numkeys key [key ...] arg [arg ...]](#evalsha-sha1-numkeys-key-key--arg-arg-)
- [SCRIPT LOAD script](#script-load-script)
- [SCRIPT EXISTS script [script ...]](#script-exists-script-script-)
- [SCRIPT FLUSH](#script-flush)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## KV
@ -524,82 +510,6 @@ ledis> TTL mykey
(integer) -1
```
### XSCAN key [MATCH match] [COUNT count]
Iterate KV keys incrementally.
Key is the start for the current iteration.
Match is the regexp for checking matched key.
Count is the maximum retrieved elememts number, default is 10.
**Return value**
an array of two values, first value is the key for next iteration, second value is an array of elements.
**Examples**
```
ledis>set a 1
OK
ledis>set b 2
OK
ledis>set c 3
OK
127.0.0.1:6380>xscan ""
1) ""
2) ["a" "b" "c"]
ledis>xscan "" count 1
1) "a"
2) ["a"]
ledis>xscan "a" count 1
1) "b"
2) ["b"]
ledis>xscan "b" count 1
1) "c"
2) ["c"]
ledis>xscan "c" count 1
1) ""
2) []
```
### XREVSCAN key [MATCH match] [COUNT count]
Reverse iterate KV keys incrementally.
Key is the start for the current iteration.
Match is the regexp for checking matched key.
Count is the maximum retrieved elememts number, default is 10.
**Return value**
an array of two values, first value is the key for next iteration, second value is an array of elements.
**Examples**
```
ledis>set a 1
OK
ledis>set b 2
OK
ledis>set c 3
OK
127.0.0.1:6380>xrevscan ""
1) ""
2) ["c" "b" "a"]
ledis>xrevscan "" count 1
1) "c"
2) ["c"]
ledis>xrevscan "c" count 1
1) "b"
2) ["b"]
ledis>xrevscan "b" count 1
1) "a"
2) ["a"]
ledis>xrevscan "a" count 1
1) ""
2) []
```
### DUMP key
Serialize the value stored at key with KV type in a Redis-specific format like RDB and return it to the user. The returned value can be synthesized back into a key using the RESTORE command.
@ -617,6 +527,25 @@ ledis>DUMP mykey
"\x00\xc0\n\x06\x00\xf8r?\xc5\xfb\xfb_("
```
### APPEND key value
### GETRANGE key start end
### SETRANGE key offset value
### STRLEN key
### BITCOUNT key [start] [end]
### BITOP operation destkey key [key ...]
### BITPOS key bit [start] [end]
### GETBIT key offset
### SETBIT key offset value
## Hash
### HDEL key field [field ...]
@ -980,35 +909,14 @@ ledis> HPERSIST not_exists_key
(integer) 0
```
### HXSCAN key [MATCH match] [COUNT count]
Iterate Hash keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### HXREVSCAN key [MATCH match] [COUNT count]
Reverse iterate Hash keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### XHSCAN key [MATCH match] [COUNT count]
Iterate Hash keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### XHREVSCAN key [MATCH match] [COUNT count]
Reverse iterate Hash keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### HDUMP key
See [DUMP](#dump-key) for more information.
### HKEYEXISTS key
Check key exists for hash data, like [EXISTS key](#exists-key)
## List
### BLPOP key [key ...] timeout
@ -1331,35 +1239,13 @@ ledis> LPERSIST b
(integer) 0
```
### LXSCAN key [MATCH match] [COUNT count]
Iterate list keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### LXREVSCAN key [MATCH match] [COUNT count]
Reverse iterate list keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### XLSCAN key [MATCH match] [COUNT count]
Iterate list keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### XLREVSCAN key [MATCH match] [COUNT count]
Reverse iterate list keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### LDUMP key
See [DUMP](#dump-key) for more information.
### LKEYEXISTS key
Check key exists for list data, like [EXISTS key](#exists-key)
## Set
@ -1782,37 +1668,14 @@ ledis> STTL key
(integer) -1
```
### SXSCAN key [MATCH match] [COUNT count]
Iterate Set keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### SXREVSCAN key [MATCH match] [COUNT count]
Reverse iterate Set keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### XSSCAN key [MATCH match] [COUNT count]
Iterate Set keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### XSREVSCAN key [MATCH match] [COUNT count]
Reverse iterate Set keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### SDUMP key
See [DUMP](#dump-key) for more information.
### SKEYEXISTS key
Check key exists for set data, like [EXISTS key](#exists-key)
## ZSet
### ZADD key score member [score member ...]
@ -2432,30 +2295,6 @@ ledis> ZRANGE out 0 -1 WITHSCORES
4) "10"
```
### ZXSCAN key [MATCH match] [COUNT count]
Iterate ZSet keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### ZXREVSCAN key [MATCH match] [COUNT count]
Reverse iterate ZSet keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### XZSCAN key [MATCH match] [COUNT count]
Iterate ZSet keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### XZREVSCAN key [MATCH match] [COUNT count]
Reverse iterate ZSet keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### ZRANGEBYLEX key min max [LIMIT offset count]
When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at key with a value between min and max.
@ -2526,187 +2365,64 @@ ledis> ZLEXCOUNT myzset - [c
See [DUMP](#dump-key) for more information.
## Bitmap
### ZKEYEXISTS key
### BGET key
Check key exists for zset data, like [EXISTS key](#exists-key)
Returns the whole binary data stored at `key`.
## Scan
### XSCAN type cursor [MATCH match] [COUNT count]
Iterate data type keys incrementally.
Type is "KV", "LIST", "HASH", "SET" or "ZSET".
Cursor is the start for the current iteration.
Match is the regexp for checking matched key.
Count is the maximum retrieved elememts number, default is 10.
**Return value**
bulk: the raw value of key, or nil when key does not exist.
an array of two values, first value is the cursor for next iteration, second value is an array of elements.
**Examples**
```
ledis> BMSETBIT flag 0 1 5 1 6 1
(integer) 3
ledis> BGET flag
a
ledis>set a 1
OK
ledis>set b 2
OK
ledis>set c 3
OK
127.0.0.1:6380>xscan "KV" ""
1) ""
2) ["a" "b" "c"]
ledis>xscan "KV" "" count 1
1) "a"
2) ["a"]
ledis>xscan "KV" "a" count 1
1) "b"
2) ["b"]
ledis>xscan "KV" "b" count 1
1) "c"
2) ["c"]
ledis>xscan "KV" "c" count 1
1) ""
2) []
```
### XHSCAN key cursor [MATCH match] [COUNT count]
### BGETBIT key offset
Same like [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count), but return array of elements
contains two elements, a field and a value.
Returns the bit value at `offset` in the string value stored at `key`.
When *offset* beyond the data length, ot the target data is not exist, the bit value will be 0 always.
### XSSCAN key cursor [MATCH match] [COUNT count]
**Return value**
Same like [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count)
int64 : the bit value stored at offset.
### XZSCAN key cursor [MATCH match] [COUNT count]
**Examples**
```
ledis> BSETBIT flag 1024 1
(integer) 0
ledis> BGETBIT flag 0
(integer) 0
ledis> BGETBIT flag 1024
(integer) 1
ledis> BGETBIT flag 65535
(integer) 0
```
### BSETBIT key offset value
Sets or clear the bit at `offset` in the binary data sotred at `key`.
The bit is either set or cleared depending on `value`, which can be either `0` or `1`.
The *offset* argument is required to be qual to 0, and smaller than
2^23 (this means bitmap limits to 8MB).
**Return value**
int64 : the original bit value stored at offset.
**Examples**
```
ledis> BSETBIT flag 0 1
(integer) 0
ledis> BSETBIT flag 0 0
(integer) 1
ledis> BGETBIT flag 0 99
ERR invalid command param
```
### BMSETBIT key offset value [offset value ...]
Sets the given *offset* to their respective values.
**Return value**
int64 : The number of input *offset*
**Examples**
```
ledis> BMSETBIT flag 0 1 1 1 2 0 3 1
(integer) 4
ledis> BCOUNT flag
(integer) 3
```
### BOPT operation destkey key [key ...]
Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key.
**Return value**
Int64:
The size of the string stored in the destination key, that is equal to the size of the longest input string.
**Examples**
```
ledis> BMSETBIT a 0 1 2 1
(integer) 2
ledis> BMSETBIT b 1 1
(integer) 1
ledis> BOPT AND res a b
(integer) 3
ledis> BCOUNT res
(integer) 0
ledis> BOPT OR res2 a b
(integer) 3
ledis> BCOUNT res2
(integer) 3
ledis> BOPT XOR res3 a b
(integer) 3
ledis> BCOUNT res3
(integer) 3
```
### BCOUNT key [start end]
Count the number of set bits in a bitmap.
**Return value**
int64 : The number of bits set to 1.
**Examples**
```
ledis> BMSETBIT flag 0 1 5 1 6 1
(integer) 3
ledis> BGET flag
a
ledis> BCOUNT flag
(integer) 3
ledis> BCOUNT flag 0 0s
(integer) 1
ledis> BCOUNT flag 0 4
(integer) 1
ledis> BCOUNT flag 0 5
(integer) 2
ledis> BCOUNT flag 5 6
(integer) 2
```
### BEXPIRE key seconds
(refer to [EXPIRE](#expire-key-seconds) api for other types)
### BEXPIREAT key timestamp
(refer to [EXPIREAT](#expireat-key-timestamp) api for other types)
### BTTL key
(refer to [TTL](#ttl-key) api for other types)
### BPERSIST key
(refer to [PERSIST](#persist-key) api for other types)
### BXSCAN key [MATCH match] [COUNT count]
Iterate Bitmap keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### BXREVSCAN key [MATCH match] [COUNT count]
Reverse iterate Bitmap keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
### XBSCAN key [MATCH match] [COUNT count]
Iterate Bitmap keys incrementally.
See [XSCAN](#xscan-key-match-match-count-count) for more information.
### XBREVSCAN key [MATCH match] [COUNT count]
Reverse iterate Bitmap keys incrementally.
See [XREVSCAN](#xrevscan-key-match-match-count-count) for more information.
Same like [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count), but return array of elements
contains two elements, a member and its associated score.
## Replication

1
doc/doctoc_commands.sh Executable file
View File

@ -0,0 +1 @@
doctoc --title '**Commands List**' commands.md

View File

@ -9,6 +9,11 @@ http_addr = "127.0.0.1:11181"
# Data store path, all ledisdb's data will be saved here
data_dir = "/tmp/ledis_server"
# Set the number of databases. You can use `select dbindex` to choose a db.
# dbindex must be in [0, databases - 1].
# Maximum databases is 256.
databases = 16
# Log server command, set empty to disable
access_log = ""

View File

@ -6,20 +6,57 @@ import (
const Version = "0.4"
type DataType byte
// for out use
const (
NoneType byte = 0
KVType byte = 1
HashType byte = 2
HSizeType byte = 3
ListType byte = 4
LMetaType byte = 5
ZSetType byte = 6
ZSizeType byte = 7
ZScoreType byte = 8
BitType byte = 9
BitMetaType byte = 10
SetType byte = 11
SSizeType byte = 12
KV DataType = iota
LIST
HASH
SET
ZSET
)
func (d DataType) String() string {
switch d {
case KV:
return KVName
case LIST:
return ListName
case HASH:
return HashName
case SET:
return SetName
case ZSET:
return ZSetName
default:
return "unknown"
}
}
const (
KVName = "KV"
ListName = "LIST"
HashName = "HASH"
SetName = "SET"
ZSetName = "ZSET"
)
// for backend store
const (
NoneType byte = 0
KVType byte = 1
HashType byte = 2
HSizeType byte = 3
ListType byte = 4
LMetaType byte = 5
ZSetType byte = 6
ZSizeType byte = 7
ZScoreType byte = 8
// BitType byte = 9
// BitMetaType byte = 10
SetType byte = 11
SSizeType byte = 12
maxDataType byte = 100
@ -36,16 +73,16 @@ const (
var (
TypeName = map[byte]string{
KVType: "kv",
HashType: "hash",
HSizeType: "hsize",
ListType: "list",
LMetaType: "lmeta",
ZSetType: "zset",
ZSizeType: "zsize",
ZScoreType: "zscore",
BitType: "bit",
BitMetaType: "bitmeta",
KVType: "kv",
HashType: "hash",
HSizeType: "hsize",
ListType: "list",
LMetaType: "lmeta",
ZSetType: "zset",
ZSizeType: "zsize",
ZScoreType: "zscore",
// BitType: "bit",
// BitMetaType: "bitmeta",
SetType: "set",
SSizeType: "ssize",
ExpTimeType: "exptime",
@ -67,9 +104,6 @@ var (
)
const (
//we don't support too many databases
MaxDBNumber uint8 = 16
//max key size
MaxKeySize int = 1024

View File

@ -81,20 +81,20 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) {
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, score, 10)
}
case BitType:
if key, seq, err := db.bDecodeBinKey(k); err != nil {
return nil, err
} else {
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendUint(buf, uint64(seq), 10)
}
case BitMetaType:
if key, err := db.bDecodeMetaKey(k); err != nil {
return nil, err
} else {
buf = strconv.AppendQuote(buf, hack.String(key))
}
// case BitType:
// if key, seq, err := db.bDecodeBinKey(k); err != nil {
// return nil, err
// } else {
// buf = strconv.AppendQuote(buf, hack.String(key))
// buf = append(buf, ' ')
// buf = strconv.AppendUint(buf, uint64(seq), 10)
// }
// case BitMetaType:
// if key, err := db.bDecodeMetaKey(k); err != nil {
// return nil, err
// } else {
// buf = strconv.AppendQuote(buf, hack.String(key))
// }
case SetType:
if key, member, err := db.sDecodeSetKey(k); err != nil {
return nil, err

View File

@ -1,26 +0,0 @@
package ledis
import ()
// todo, add info
// type Keyspace struct {
// Kvs int `json:"kvs"`
// KvExpires int `json:"kv_expires"`
// Lists int `json:"lists"`
// ListExpires int `json:"list_expires"`
// Bitmaps int `json:"bitmaps"`
// BitmapExpires int `json:"bitmap_expires"`
// ZSets int `json:"zsets"`
// ZSetExpires int `json:"zset_expires"`
// Hashes int `json:"hashes"`
// HashExpires int `json:"hahsh_expires"`
// }
// type Info struct {
// KeySpaces [MaxDBNumber]Keyspace
// }

View File

@ -18,7 +18,7 @@ type Ledis struct {
cfg *config.Config
ldb *store.DB
dbs [MaxDBNumber]*DB
dbs []*DB
quit chan struct{}
wg sync.WaitGroup
@ -35,7 +35,7 @@ type Ledis struct {
lock io.Closer
tcs [MaxDBNumber]*ttlChecker
tcs []*ttlChecker
}
func Open(cfg *config.Config) (*Ledis, error) {
@ -43,6 +43,10 @@ func Open(cfg *config.Config) (*Ledis, error) {
cfg.DataDir = config.DefaultDataDir
}
if cfg.Databases == 0 {
cfg.Databases = 16
}
os.MkdirAll(cfg.DataDir, 0755)
var err error
@ -78,7 +82,8 @@ func Open(cfg *config.Config) (*Ledis, error) {
l.r = nil
}
for i := uint8(0); i < MaxDBNumber; i++ {
l.dbs = make([]*DB, cfg.Databases)
for i := uint8(0); i < cfg.Databases; i++ {
l.dbs[i] = l.newDB(i)
}
@ -105,7 +110,7 @@ func (l *Ledis) Close() {
}
func (l *Ledis) Select(index int) (*DB, error) {
if index < 0 || index >= int(MaxDBNumber) {
if index < 0 || index >= len(l.dbs) {
return nil, fmt.Errorf("invalid db index %d", index)
}
@ -167,6 +172,7 @@ func (l *Ledis) IsReadOnly() bool {
}
func (l *Ledis) checkTTL() {
l.tcs = make([]*ttlChecker, len(l.dbs))
for i, db := range l.dbs {
c := newTTLChecker(db)
@ -174,7 +180,7 @@ func (l *Ledis) checkTTL() {
c.register(ListType, db.listBatch, db.lDelete)
c.register(HashType, db.hashBatch, db.hDelete)
c.register(ZSetType, db.zsetBatch, db.zDelete)
c.register(BitType, db.binBatch, db.bDelete)
// c.register(BitType, db.binBatch, db.bDelete)
c.register(SetType, db.setBatch, db.sDelete)
l.tcs[i] = c

View File

@ -36,8 +36,8 @@ type DB struct {
listBatch *batch
hashBatch *batch
zsetBatch *batch
binBatch *batch
setBatch *batch
// binBatch *batch
setBatch *batch
status uint8
@ -60,7 +60,7 @@ func (l *Ledis) newDB(index uint8) *DB {
d.listBatch = d.newBatch()
d.hashBatch = d.newBatch()
d.zsetBatch = d.newBatch()
d.binBatch = d.newBatch()
// d.binBatch = d.newBatch()
d.setBatch = d.newBatch()
d.lbkeys = newLBlockKeys()
@ -86,7 +86,6 @@ func (db *DB) FlushAll() (drop int64, err error) {
db.lFlush,
db.hFlush,
db.zFlush,
db.bFlush,
db.sFlush}
for _, flush := range all {
@ -117,9 +116,9 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) {
case ZSetType:
deleteFunc = db.zDelete
metaDataType = ZSizeType
case BitType:
deleteFunc = db.bDelete
metaDataType = BitMetaType
// case BitType:
// deleteFunc = db.bDelete
// metaDataType = BitMetaType
case SetType:
deleteFunc = db.sDelete
metaDataType = SSizeType
@ -128,7 +127,7 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) {
}
var keys [][]byte
keys, err = db.scan(metaDataType, nil, 1024, false, "")
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
for len(keys) != 0 || err != nil {
for _, key := range keys {
deleteFunc(t, key)
@ -141,7 +140,7 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) {
} else {
drop += int64(len(keys))
}
keys, err = db.scan(metaDataType, nil, 1024, false, "")
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
}
return
}

View File

@ -44,7 +44,7 @@ func (db *DB) Multi() (*Multi, error) {
m.DB.listBatch = m.newBatch()
m.DB.hashBatch = m.newBatch()
m.DB.zsetBatch = m.newBatch()
m.DB.binBatch = m.newBatch()
// m.DB.binBatch = m.newBatch()
m.DB.setBatch = m.newBatch()
m.DB.lbkeys = db.lbkeys
@ -66,7 +66,7 @@ func (m *Multi) Close() error {
}
func (m *Multi) Select(index int) error {
if index < 0 || index >= int(MaxDBNumber) {
if index < 0 || index >= int(m.l.cfg.Databases) {
return fmt.Errorf("invalid db index %d", index)
}

View File

@ -9,19 +9,48 @@ import (
var errDataType = errors.New("error data type")
var errMetaKey = errors.New("error meta key")
func (db *DB) scan(dataType byte, key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.scanGeneric(dataType, key, count, inclusive, match, false)
//fif inclusive is true, scan range [cursor, inf) else (cursor, inf)
func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
storeDataType, err := getDataStoreType(dataType)
if err != nil {
return nil, err
}
return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false)
}
func (db *DB) revscan(dataType byte, key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.scanGeneric(dataType, key, count, inclusive, match, true)
//if inclusive is true, revscan range (-inf, cursor] else (inf, cursor)
func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
storeDataType, err := getDataStoreType(dataType)
if err != nil {
return nil, err
}
return db.scanGeneric(storeDataType, cursor, count, inclusive, match, true)
}
func (db *DB) scanGeneric(dataType byte, key []byte, count int,
inclusive bool, match string, reverse bool) ([][]byte, error) {
var minKey, maxKey []byte
func getDataStoreType(dataType DataType) (byte, error) {
var storeDataType byte
switch dataType {
case KV:
storeDataType = KVType
case LIST:
storeDataType = LMetaType
case HASH:
storeDataType = HSizeType
case SET:
storeDataType = SSizeType
case ZSET:
storeDataType = ZSizeType
default:
return 0, errDataType
}
return storeDataType, nil
}
func buildMatchRegexp(match string) (*regexp.Regexp, error) {
var err error
var r *regexp.Regexp
var r *regexp.Regexp = nil
if len(match) > 0 {
if r, err = regexp.Compile(match); err != nil {
@ -29,13 +58,24 @@ func (db *DB) scanGeneric(dataType byte, key []byte, count int,
}
}
return r, nil
}
func (db *DB) scanGeneric(storeDataType byte, key []byte, count int,
inclusive bool, match string, reverse bool) ([][]byte, error) {
var minKey, maxKey []byte
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
tp := store.RangeOpen
if !reverse {
if minKey, err = db.encodeScanMinKey(dataType, key); err != nil {
if minKey, err = db.encodeScanMinKey(storeDataType, key); err != nil {
return nil, err
}
if maxKey, err = db.encodeScanMaxKey(dataType, nil); err != nil {
if maxKey, err = db.encodeScanMaxKey(storeDataType, nil); err != nil {
return nil, err
}
@ -43,10 +83,10 @@ func (db *DB) scanGeneric(dataType byte, key []byte, count int,
tp = store.RangeROpen
}
} else {
if minKey, err = db.encodeScanMinKey(dataType, nil); err != nil {
if minKey, err = db.encodeScanMinKey(storeDataType, nil); err != nil {
return nil, err
}
if maxKey, err = db.encodeScanMaxKey(dataType, key); err != nil {
if maxKey, err = db.encodeScanMaxKey(storeDataType, key); err != nil {
return nil, err
}
@ -69,7 +109,7 @@ func (db *DB) scanGeneric(dataType byte, key []byte, count int,
v := make([][]byte, 0, count)
for i := 0; it.Valid() && i < count; it.Next() {
if k, err := db.decodeScanKey(dataType, it.Key()); err != nil {
if k, err := db.decodeScanKey(storeDataType, it.Key()); err != nil {
continue
} else if r != nil && !r.Match(k) {
continue
@ -82,36 +122,36 @@ func (db *DB) scanGeneric(dataType byte, key []byte, count int,
return v, nil
}
func (db *DB) encodeScanMinKey(dataType byte, key []byte) ([]byte, error) {
func (db *DB) encodeScanMinKey(storeDataType byte, key []byte) ([]byte, error) {
if len(key) == 0 {
return db.encodeScanKey(dataType, nil)
return db.encodeScanKey(storeDataType, nil)
} else {
if err := checkKeySize(key); err != nil {
return nil, err
}
return db.encodeScanKey(dataType, key)
return db.encodeScanKey(storeDataType, key)
}
}
func (db *DB) encodeScanMaxKey(dataType byte, key []byte) ([]byte, error) {
func (db *DB) encodeScanMaxKey(storeDataType byte, key []byte) ([]byte, error) {
if len(key) > 0 {
if err := checkKeySize(key); err != nil {
return nil, err
}
return db.encodeScanKey(dataType, key)
return db.encodeScanKey(storeDataType, key)
}
k, err := db.encodeScanKey(dataType, nil)
k, err := db.encodeScanKey(storeDataType, nil)
if err != nil {
return nil, err
}
k[len(k)-1] = dataType + 1
k[len(k)-1] = storeDataType + 1
return k, nil
}
func (db *DB) encodeScanKey(dataType byte, key []byte) ([]byte, error) {
switch dataType {
func (db *DB) encodeScanKey(storeDataType byte, key []byte) ([]byte, error) {
switch storeDataType {
case KVType:
return db.encodeKVKey(key), nil
case LMetaType:
@ -120,17 +160,137 @@ func (db *DB) encodeScanKey(dataType byte, key []byte) ([]byte, error) {
return db.hEncodeSizeKey(key), nil
case ZSizeType:
return db.zEncodeSizeKey(key), nil
case BitMetaType:
return db.bEncodeMetaKey(key), nil
case SSizeType:
return db.sEncodeSizeKey(key), nil
// case BitMetaType:
// return db.bEncodeMetaKey(key), nil
default:
return nil, errDataType
}
}
func (db *DB) decodeScanKey(dataType byte, ek []byte) ([]byte, error) {
if len(ek) < 2 || ek[0] != db.index || ek[1] != dataType {
func (db *DB) decodeScanKey(storeDataType byte, ek []byte) ([]byte, error) {
if len(ek) < 2 || ek[0] != db.index || ek[1] != storeDataType {
return nil, errMetaKey
}
return ek[2:], nil
}
// for specail data scan
func (db *DB) buildDataScanIterator(start []byte, stop []byte, inclusive bool) *store.RangeLimitIterator {
tp := store.RangeROpen
if !inclusive {
tp = store.RangeOpen
}
it := db.bucket.RangeIterator(start, stop, tp)
return it
}
func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
start := db.hEncodeHashKey(key, cursor)
stop := db.hEncodeStopKey(key)
v := make([]FVPair, 0, 16)
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
it := db.buildDataScanIterator(start, stop, inclusive)
defer it.Close()
for i := 0; it.Valid() && i < count; it.Next() {
_, f, err := db.hDecodeHashKey(it.Key())
if err != nil {
return nil, err
} else if r != nil && !r.Match(f) {
continue
}
v = append(v, FVPair{Field: f, Value: it.Value()})
i++
}
return v, nil
}
func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
start := db.sEncodeSetKey(key, cursor)
stop := db.sEncodeStopKey(key)
v := make([][]byte, 0, 16)
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
it := db.buildDataScanIterator(start, stop, inclusive)
defer it.Close()
for i := 0; it.Valid() && i < count; it.Next() {
_, m, err := db.sDecodeSetKey(it.Key())
if err != nil {
return nil, err
} else if r != nil && !r.Match(m) {
continue
}
v = append(v, m)
i++
}
return v, nil
}
func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
start := db.zEncodeSetKey(key, cursor)
stop := db.zEncodeStopSetKey(key)
v := make([]ScorePair, 0, 16)
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
it := db.buildDataScanIterator(start, stop, inclusive)
defer it.Close()
for i := 0; it.Valid() && i < count; it.Next() {
_, m, err := db.zDecodeSetKey(it.Key())
if err != nil {
return nil, err
} else if r != nil && !r.Match(m) {
continue
}
score, err := Int64(it.Value(), nil)
if err != nil {
return nil, err
}
v = append(v, ScorePair{Score: score, Member: m})
i++
}
return v, nil
}

View File

@ -21,13 +21,13 @@ func TestDBScan(t *testing.T) {
db.FlushAll()
if v, err := db.Scan(nil, 10, true, ""); err != nil {
if v, err := db.Scan(KV, nil, 10, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 0 {
t.Fatal(len(v))
}
if v, err := db.RevScan(nil, 10, true, ""); err != nil {
if v, err := db.RevScan(KV, nil, 10, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 0 {
t.Fatal(len(v))
@ -37,73 +37,73 @@ func TestDBScan(t *testing.T) {
db.Set([]byte("b"), []byte{})
db.Set([]byte("c"), []byte{})
if v, err := db.Scan(nil, 1, true, ""); err != nil {
if v, err := db.Scan(KV, nil, 1, true, ""); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "a")
}
if v, err := db.Scan([]byte("a"), 2, false, ""); err != nil {
if v, err := db.Scan(KV, []byte("a"), 2, false, ""); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "b", "c")
}
if v, err := db.Scan(nil, 3, true, ""); err != nil {
if v, err := db.Scan(KV, nil, 3, true, ""); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "a", "b", "c")
}
if v, err := db.Scan(nil, 3, true, "b"); err != nil {
if v, err := db.Scan(KV, nil, 3, true, "b"); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "b")
}
if v, err := db.Scan(nil, 3, true, "."); err != nil {
if v, err := db.Scan(KV, nil, 3, true, "."); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "a", "b", "c")
}
if v, err := db.Scan(nil, 3, true, "a+"); err != nil {
if v, err := db.Scan(KV, nil, 3, true, "a+"); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "a")
}
if v, err := db.RevScan(nil, 1, true, ""); err != nil {
if v, err := db.RevScan(KV, nil, 1, true, ""); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "c")
}
if v, err := db.RevScan([]byte("c"), 2, false, ""); err != nil {
if v, err := db.RevScan(KV, []byte("c"), 2, false, ""); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "b", "a")
}
if v, err := db.RevScan(nil, 3, true, ""); err != nil {
if v, err := db.RevScan(KV, nil, 3, true, ""); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "c", "b", "a")
}
if v, err := db.RevScan(nil, 3, true, "b"); err != nil {
if v, err := db.RevScan(KV, nil, 3, true, "b"); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "b")
}
if v, err := db.RevScan(nil, 3, true, "."); err != nil {
if v, err := db.RevScan(KV, nil, 3, true, "."); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "c", "b", "a")
}
if v, err := db.RevScan(nil, 3, true, "c+"); err != nil {
if v, err := db.RevScan(KV, nil, 3, true, "c+"); err != nil {
t.Fatal(err)
} else {
checkTestScan(t, v, "c")
@ -111,7 +111,7 @@ func TestDBScan(t *testing.T) {
}
func TestDBHScan(t *testing.T) {
func TestDBHKeyScan(t *testing.T) {
db := getTestDB()
db.hFlush()
@ -125,7 +125,7 @@ func TestDBHScan(t *testing.T) {
k3 := []byte("k3")
db.HSet(k3, []byte("3"), []byte{})
if v, err := db.HScan(nil, 1, true, ""); err != nil {
if v, err := db.Scan(HASH, nil, 1, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 1 {
t.Fatal("invalid length ", len(v))
@ -133,7 +133,7 @@ func TestDBHScan(t *testing.T) {
t.Fatal("invalid value ", string(v[0]))
}
if v, err := db.HScan(k1, 2, true, ""); err != nil {
if v, err := db.Scan(HASH, k1, 2, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 2 {
t.Fatal("invalid length ", len(v))
@ -143,7 +143,7 @@ func TestDBHScan(t *testing.T) {
t.Fatal("invalid value ", string(v[1]))
}
if v, err := db.HScan(k1, 2, false, ""); err != nil {
if v, err := db.Scan(HASH, k1, 2, false, ""); err != nil {
t.Fatal(err)
} else if len(v) != 2 {
t.Fatal("invalid length ", len(v))
@ -155,7 +155,7 @@ func TestDBHScan(t *testing.T) {
}
func TestDBZScan(t *testing.T) {
func TestDBZKeyScan(t *testing.T) {
db := getTestDB()
db.zFlush()
@ -169,7 +169,7 @@ func TestDBZScan(t *testing.T) {
k3 := []byte("k3")
db.ZAdd(k3, ScorePair{3, []byte("m")})
if v, err := db.ZScan(nil, 1, true, ""); err != nil {
if v, err := db.Scan(ZSET, nil, 1, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 1 {
t.Fatal("invalid length ", len(v))
@ -177,7 +177,7 @@ func TestDBZScan(t *testing.T) {
t.Fatal("invalid value ", string(v[0]))
}
if v, err := db.ZScan(k1, 2, true, ""); err != nil {
if v, err := db.Scan(ZSET, k1, 2, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 2 {
t.Fatal("invalid length ", len(v))
@ -187,7 +187,7 @@ func TestDBZScan(t *testing.T) {
t.Fatal("invalid value ", string(v[1]))
}
if v, err := db.ZScan(k1, 2, false, ""); err != nil {
if v, err := db.Scan(ZSET, k1, 2, false, ""); err != nil {
t.Fatal(err)
} else if len(v) != 2 {
t.Fatal("invalid length ", len(v))
@ -199,7 +199,7 @@ func TestDBZScan(t *testing.T) {
}
func TestDBLScan(t *testing.T) {
func TestDBLKeyScan(t *testing.T) {
db := getTestDB()
db.lFlush()
@ -219,7 +219,7 @@ func TestDBLScan(t *testing.T) {
t.Fatal(err.Error())
}
if v, err := db.LScan(nil, 1, true, ""); err != nil {
if v, err := db.Scan(LIST, nil, 1, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 1 {
t.Fatal("invalid length ", len(v))
@ -227,7 +227,7 @@ func TestDBLScan(t *testing.T) {
t.Fatal("invalid value ", string(v[0]))
}
if v, err := db.LScan(k1, 2, true, ""); err != nil {
if v, err := db.Scan(LIST, k1, 2, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 2 {
t.Fatal("invalid length ", len(v))
@ -237,7 +237,7 @@ func TestDBLScan(t *testing.T) {
t.Fatal("invalid value ", string(v[1]))
}
if v, err := db.LScan(k1, 2, false, ""); err != nil {
if v, err := db.Scan(LIST, k1, 2, false, ""); err != nil {
t.Fatal(err)
} else if len(v) != 2 {
t.Fatal("invalid length ", len(v))
@ -249,60 +249,10 @@ func TestDBLScan(t *testing.T) {
}
func TestDBBScan(t *testing.T) {
// db := getTestDB()
// db.bFlush()
// k1 := []byte("k1")
// if _, err := db.BSetBit(k1, 1, 1); err != nil {
// t.Fatal(err.Error())
// }
// k2 := []byte("k2")
// if _, err := db.BSetBit(k2, 1, 1); err != nil {
// t.Fatal(err.Error())
// }
// k3 := []byte("k3")
// if _, err := db.BSetBit(k3, 1, 0); err != nil {
// t.Fatal(err.Error())
// }
// if v, err := db.BScan(nil, 1, true, ""); err != nil {
// t.Fatal(err)
// } else if len(v) != 1 {
// t.Fatal("invalid length ", len(v))
// } else if string(v[0]) != "k1" {
// t.Fatal("invalid value ", string(v[0]))
// }
// if v, err := db.BScan(k1, 2, true, ""); err != nil {
// t.Fatal(err)
// } else if len(v) != 2 {
// t.Fatal("invalid length ", len(v))
// } else if string(v[0]) != "k1" {
// t.Fatal("invalid value ", string(v[0]))
// } else if string(v[1]) != "k2" {
// t.Fatal("invalid value ", string(v[1]))
// }
// if v, err := db.BScan(k1, 2, false, ""); err != nil {
// t.Fatal(err)
// } else if len(v) != 2 {
// t.Fatal("invalid length ", len(v))
// } else if string(v[0]) != "k2" {
// t.Fatal("invalid value ", string(v[0]))
// } else if string(v[1]) != "k3" {
// t.Fatal("invalid value ", string(v[1]))
// }
}
func TestDBSScan(t *testing.T) {
func TestDBSKeyScan(t *testing.T) {
db := getTestDB()
db.bFlush()
db.sFlush()
k1 := []byte("k1")
if _, err := db.SAdd(k1, []byte("1")); err != nil {
@ -319,7 +269,7 @@ func TestDBSScan(t *testing.T) {
t.Fatal(err.Error())
}
if v, err := db.SScan(nil, 1, true, ""); err != nil {
if v, err := db.Scan(SET, nil, 1, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 1 {
t.Fatal("invalid length ", len(v))
@ -327,7 +277,7 @@ func TestDBSScan(t *testing.T) {
t.Fatal("invalid value ", string(v[0]))
}
if v, err := db.SScan(k1, 2, true, ""); err != nil {
if v, err := db.Scan(SET, k1, 2, true, ""); err != nil {
t.Fatal(err)
} else if len(v) != 2 {
t.Fatal("invalid length ", len(v))
@ -337,7 +287,7 @@ func TestDBSScan(t *testing.T) {
t.Fatal("invalid value ", string(v[1]))
}
if v, err := db.SScan(k1, 2, false, ""); err != nil {
if v, err := db.Scan(SET, k1, 2, false, ""); err != nil {
t.Fatal(err)
} else if len(v) != 2 {
t.Fatal("invalid length ", len(v))
@ -346,5 +296,77 @@ func TestDBSScan(t *testing.T) {
} else if string(v[1]) != "k3" {
t.Fatal("invalid value ", string(v[1]))
}
}
func TestDBHScan(t *testing.T) {
db := getTestDB()
key := []byte("scan_h_key")
value := []byte("hello world")
db.HSet(key, []byte("1"), value)
db.HSet(key, []byte("222"), value)
db.HSet(key, []byte("19"), value)
db.HSet(key, []byte("1234"), value)
v, err := db.HScan(key, nil, 100, true, "")
if err != nil {
t.Fatal(err)
} else if len(v) != 4 {
t.Fatal("invalid count", len(v))
}
v, err = db.HScan(key, []byte("19"), 1, false, "")
if err != nil {
t.Fatal(err)
} else if len(v) != 1 {
t.Fatal("invalid count", len(v))
} else if string(v[0].Field) != "222" {
t.Fatal(string(v[0].Field))
}
}
func TestDBSScan(t *testing.T) {
db := getTestDB()
key := []byte("scan_s_key")
db.SAdd(key, []byte("1"), []byte("222"), []byte("19"), []byte("1234"))
v, err := db.SScan(key, nil, 100, true, "")
if err != nil {
t.Fatal(err)
} else if len(v) != 4 {
t.Fatal("invalid count", len(v))
}
v, err = db.SScan(key, []byte("19"), 1, false, "")
if err != nil {
t.Fatal(err)
} else if len(v) != 1 {
t.Fatal("invalid count", len(v))
} else if string(v[0]) != "222" {
t.Fatal(string(v[0]))
}
}
func TestDBZScan(t *testing.T) {
db := getTestDB()
key := []byte("scan_z_key")
db.ZAdd(key, ScorePair{1, []byte("1")}, ScorePair{2, []byte("222")}, ScorePair{3, []byte("19")}, ScorePair{4, []byte("1234")})
v, err := db.ZScan(key, nil, 100, true, "")
if err != nil {
t.Fatal(err)
} else if len(v) != 4 {
t.Fatal("invalid count", len(v))
}
v, err = db.ZScan(key, []byte("19"), 1, false, "")
if err != nil {
t.Fatal(err)
} else if len(v) != 1 {
t.Fatal("invalid count", len(v))
} else if string(v[0].Member) != "222" {
t.Fatal(string(v[0].Member))
}
}

File diff suppressed because it is too large Load Diff

View File

@ -354,6 +354,8 @@ func (db *DB) HGetAll(key []byte) ([]FVPair, error) {
v := make([]FVPair, 0, 16)
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
defer it.Close()
for ; it.Valid(); it.Next() {
_, f, err := db.hDecodeHashKey(it.Key())
if err != nil {
@ -363,8 +365,6 @@ func (db *DB) HGetAll(key []byte) ([]FVPair, error) {
v = append(v, FVPair{Field: f, Value: it.Value()})
}
it.Close()
return v, nil
}
@ -379,6 +379,8 @@ func (db *DB) HKeys(key []byte) ([][]byte, error) {
v := make([][]byte, 0, 16)
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
defer it.Close()
for ; it.Valid(); it.Next() {
_, f, err := db.hDecodeHashKey(it.Key())
if err != nil {
@ -387,8 +389,6 @@ func (db *DB) HKeys(key []byte) ([][]byte, error) {
v = append(v, f)
}
it.Close()
return v, nil
}
@ -403,6 +403,8 @@ func (db *DB) HValues(key []byte) ([][]byte, error) {
v := make([][]byte, 0, 16)
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
defer it.Close()
for ; it.Valid(); it.Next() {
_, _, err := db.hDecodeHashKey(it.Key())
if err != nil {
@ -412,8 +414,6 @@ func (db *DB) HValues(key []byte) ([][]byte, error) {
v = append(v, it.Value())
}
it.Close()
return v, nil
}
@ -460,14 +460,6 @@ func (db *DB) hFlush() (drop int64, err error) {
return db.flushType(t, HashType)
}
func (db *DB) HScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.scan(HSizeType, key, count, inclusive, match)
}
func (db *DB) HRevScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.revscan(HSizeType, key, count, inclusive, match)
}
func (db *DB) HExpire(key []byte, duration int64) (int64, error) {
if duration <= 0 {
return 0, errExpireValue
@ -510,7 +502,7 @@ func (db *DB) HPersist(key []byte) (int64, error) {
return n, err
}
func (db *DB) XHExists(key []byte) (int64, error) {
func (db *DB) HKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}

View File

@ -79,10 +79,10 @@ func TestHashPersist(t *testing.T) {
t.Fatal(n)
}
}
func TestXHashExists(t *testing.T) {
func TestHashKeyExists(t *testing.T) {
db := getTestDB()
key := []byte("xhexists_test")
v, err := db.XHExists(key)
key := []byte("hkeyexists_test")
v, err := db.HKeyExists(key)
if err != nil {
t.Fatal(err.Error())
}
@ -94,7 +94,7 @@ func TestXHashExists(t *testing.T) {
t.Fatal(err.Error())
}
v, err = db.XHExists(key)
v, err = db.HKeyExists(key)
if err != nil {
t.Fatal(err.Error())
}
@ -114,7 +114,7 @@ func TestHFlush(t *testing.T) {
}
}
if v, err := db.HScan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(HASH, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 2000 {
t.Fatal("invalid value ", len(v))
@ -135,7 +135,7 @@ func TestHFlush(t *testing.T) {
t.Fatal("invalid value ", n)
}
if v, err := db.HScan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(HASH, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 0 {
t.Fatal("invalid value length ", len(v))

View File

@ -347,16 +347,6 @@ func (db *DB) flush() (drop int64, err error) {
return db.flushType(t, KVType)
}
//if inclusive is true, scan range [key, inf) else (key, inf)
func (db *DB) Scan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.scan(KVType, key, count, inclusive, match)
}
//if inclusive is true, revscan range (-inf, key] else (inf, key)
func (db *DB) RevScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.revscan(KVType, key, count, inclusive, match)
}
func (db *DB) Expire(key []byte, duration int64) (int64, error) {
if duration <= 0 {
return 0, errExpireValue

View File

@ -282,7 +282,7 @@ func TestKVFlush(t *testing.T) {
}
}
if v, err := db.Scan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(KV, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 2000 {
t.Fatal("invalid value ", len(v))
@ -303,7 +303,7 @@ func TestKVFlush(t *testing.T) {
t.Fatal("invalid value ", n)
}
if v, err := db.Scan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(KV, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 0 {
t.Fatal("invalid value length ", len(v))

View File

@ -480,14 +480,6 @@ func (db *DB) LPersist(key []byte) (int64, error) {
return n, err
}
func (db *DB) LScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.scan(LMetaType, key, count, inclusive, match)
}
func (db *DB) LRevScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.revscan(LMetaType, key, count, inclusive, match)
}
func (db *DB) lEncodeMinKey() []byte {
return db.lEncodeMetaKey(nil)
}
@ -506,7 +498,7 @@ func (db *DB) BRPop(keys [][]byte, timeout time.Duration) ([]interface{}, error)
return db.lblockPop(keys, listTailSeq, timeout)
}
func (db *DB) XLExists(key []byte) (int64, error) {
func (db *DB) LKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}

View File

@ -145,7 +145,7 @@ func TestLFlush(t *testing.T) {
}
}
if v, err := db.LScan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(LIST, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 2000 {
t.Fatal("invalid value ", len(v))
@ -157,23 +157,23 @@ func TestLFlush(t *testing.T) {
t.Fatal("invalid value ", n)
}
if v, err := db.LScan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(LIST, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 0 {
t.Fatal("invalid value length ", len(v))
}
}
func TestXLExists(t *testing.T) {
func TestLKeyExists(t *testing.T) {
db := getTestDB()
key := []byte("xlexists_test")
if n, err := db.XLExists(key); err != nil {
key := []byte("lkeyexists_test")
if n, err := db.LKeyExists(key); err != nil {
t.Fatal(err.Error())
} else if n != 0 {
t.Fatal("invalid value ", n)
}
db.LPush(key, []byte("hello"), []byte("world"))
if n, err := db.XLExists(key); err != nil {
if n, err := db.LKeyExists(key); err != nil {
t.Fatal(err.Error())
} else if n != 1 {
t.Fatal("invalid value ", n)

View File

@ -286,7 +286,7 @@ func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) {
return n, err
}
func (db *DB) XSExists(key []byte) (int64, error) {
func (db *DB) SKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
@ -607,11 +607,3 @@ func (db *DB) SPersist(key []byte) (int64, error) {
err = t.Commit()
return n, err
}
func (db *DB) SScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.scan(SSizeType, key, count, inclusive, match)
}
func (db *DB) SRevScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.revscan(SSizeType, key, count, inclusive, match)
}

View File

@ -352,7 +352,7 @@ func TestSFlush(t *testing.T) {
}
}
if v, err := db.SScan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(SET, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 2000 {
t.Fatal("invalid value ", len(v))
@ -364,7 +364,7 @@ func TestSFlush(t *testing.T) {
t.Fatal("invalid value ", n)
}
if v, err := db.SScan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(SET, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 0 {
t.Fatal("invalid value length ", len(v))
@ -372,10 +372,10 @@ func TestSFlush(t *testing.T) {
}
func TestXSExists(t *testing.T) {
func TestSKeyExists(t *testing.T) {
db := getTestDB()
key := []byte("xsexists_test")
if n, err := db.XSExists(key); err != nil {
key := []byte("skeyexists_test")
if n, err := db.SKeyExists(key); err != nil {
t.Fatal(err.Error())
} else if n != 0 {
t.Fatal("invalid value ", n)
@ -383,7 +383,7 @@ func TestXSExists(t *testing.T) {
db.SAdd(key, []byte("hello"), []byte("world"))
if n, err := db.XSExists(key); err != nil {
if n, err := db.SKeyExists(key); err != nil {
t.Fatal(err.Error())
} else if n != 1 {
t.Fatal("invalid value ", n)

View File

@ -936,14 +936,6 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg
return n, nil
}
func (db *DB) ZScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.scan(ZSizeType, key, count, inclusive, match)
}
func (db *DB) ZRevScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.revscan(ZSizeType, key, count, inclusive, match)
}
func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, offset int, count int) ([][]byte, error) {
if min == nil {
min = db.zEncodeStartSetKey(key)
@ -1024,7 +1016,7 @@ func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (in
return n, nil
}
func (db *DB) XZExists(key []byte) (int64, error) {
func (db *DB) ZKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}

View File

@ -391,7 +391,7 @@ func TestZScan(t *testing.T) {
}
}
if v, err := db.ZScan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(ZSET, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 2000 {
t.Fatal("invalid value ", len(v))
@ -403,7 +403,7 @@ func TestZScan(t *testing.T) {
t.Fatal("invalid value ", n)
}
if v, err := db.ZScan(nil, 3000, true, ""); err != nil {
if v, err := db.Scan(ZSET, nil, 3000, true, ""); err != nil {
t.Fatal(err.Error())
} else if len(v) != 0 {
t.Fatal("invalid value length ", len(v))
@ -466,10 +466,10 @@ func TestZLex(t *testing.T) {
}
func TestXZExists(t *testing.T) {
func TestZKeyExists(t *testing.T) {
db := getTestDB()
key := []byte("xzexists_test")
if n, err := db.XZExists(key); err != nil {
key := []byte("zkeyexists_test")
if n, err := db.ZKeyExists(key); err != nil {
t.Fatal(err.Error())
} else if n != 0 {
t.Fatal("invalid value ", n)
@ -477,7 +477,7 @@ func TestXZExists(t *testing.T) {
db.ZAdd(key, ScorePair{0, []byte("a")}, ScorePair{0, []byte("b")})
if n, err := db.XZExists(key); err != nil {
if n, err := db.ZKeyExists(key); err != nil {
t.Fatal(err.Error())
} else if n != 1 {
t.Fatal("invalid value ", n)

View File

@ -58,7 +58,6 @@ func (db *DB) Begin() (*Tx, error) {
tx.DB.listBatch = tx.newBatch()
tx.DB.hashBatch = tx.newBatch()
tx.DB.zsetBatch = tx.newBatch()
tx.DB.binBatch = tx.newBatch()
tx.DB.setBatch = tx.newBatch()
tx.DB.lbkeys = db.lbkeys
@ -103,7 +102,7 @@ func (tx *Tx) newBatch() *batch {
}
func (tx *Tx) Select(index int) error {
if index < 0 || index >= int(MaxDBNumber) {
if index < 0 || index >= int(tx.l.cfg.Databases) {
return fmt.Errorf("invalid db index %d", index)
}

View File

@ -1,7 +1,7 @@
package server
import (
goledis "github.com/siddontang/ledisdb/client/go/ledis"
goledis "github.com/siddontang/ledisdb/client/goledis"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/ledis"
"net"
@ -157,7 +157,9 @@ func (app *App) Close() {
app.closeScript()
app.m.Lock()
app.m.Close()
app.m.Unlock()
app.snap.Close()

View File

@ -1,7 +1,7 @@
package server
import (
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"github.com/siddontang/ledisdb/config"
"os"
"sync"
@ -22,7 +22,8 @@ func newTestLedisClient() {
func getTestConn() *ledis.Conn {
startTestApp()
return testLedisClient.Get()
conn, _ := testLedisClient.Get()
return conn
}
func startTestApp() {

View File

@ -1,301 +1,289 @@
package server
import (
"github.com/siddontang/go/hack"
// import (
// "github.com/siddontang/go/hack"
"github.com/siddontang/ledisdb/ledis"
"strings"
)
// "github.com/siddontang/ledisdb/ledis"
// "strings"
// )
func bgetCommand(c *client) error {
args := c.args
if len(args) != 1 {
return ErrCmdParams
}
// func bgetCommand(c *client) error {
// args := c.args
// if len(args) != 1 {
// return ErrCmdParams
// }
if v, err := c.db.BGet(args[0]); err != nil {
return err
} else {
c.resp.writeBulk(v)
}
return nil
}
// if v, err := c.db.BGet(args[0]); err != nil {
// return err
// } else {
// c.resp.writeBulk(v)
// }
// return nil
// }
func bdeleteCommand(c *client) error {
args := c.args
if len(args) != 1 {
return ErrCmdParams
}
// func bdeleteCommand(c *client) error {
// args := c.args
// if len(args) != 1 {
// return ErrCmdParams
// }
if n, err := c.db.BDelete(args[0]); err != nil {
return err
} else {
c.resp.writeInteger(n)
}
return nil
}
// if n, err := c.db.BDelete(args[0]); err != nil {
// return err
// } else {
// c.resp.writeInteger(n)
// }
// return nil
// }
func bsetbitCommand(c *client) error {
args := c.args
if len(args) != 3 {
return ErrCmdParams
}
// func bsetbitCommand(c *client) error {
// args := c.args
// if len(args) != 3 {
// return ErrCmdParams
// }
var err error
var offset int32
var val int8
// var err error
// var offset int32
// var val int8
offset, err = ledis.StrInt32(args[1], nil)
// offset, err = ledis.StrInt32(args[1], nil)
if err != nil {
return ErrOffset
}
// if err != nil {
// return ErrOffset
// }
val, err = ledis.StrInt8(args[2], nil)
if val != 0 && val != 1 {
return ErrBool
}
// val, err = ledis.StrInt8(args[2], nil)
// if val != 0 && val != 1 {
// return ErrBool
// }
if err != nil {
return ErrBool
}
// if err != nil {
// return ErrBool
// }
if ori, err := c.db.BSetBit(args[0], offset, uint8(val)); err != nil {
return err
} else {
c.resp.writeInteger(int64(ori))
}
return nil
}
// if ori, err := c.db.BSetBit(args[0], offset, uint8(val)); err != nil {
// return err
// } else {
// c.resp.writeInteger(int64(ori))
// }
// return nil
// }
func bgetbitCommand(c *client) error {
args := c.args
if len(args) != 2 {
return ErrCmdParams
}
// func bgetbitCommand(c *client) error {
// args := c.args
// if len(args) != 2 {
// return ErrCmdParams
// }
offset, err := ledis.StrInt32(args[1], nil)
// offset, err := ledis.StrInt32(args[1], nil)
if err != nil {
return ErrOffset
}
// if err != nil {
// return ErrOffset
// }
if v, err := c.db.BGetBit(args[0], offset); err != nil {
return err
} else {
c.resp.writeInteger(int64(v))
}
return nil
}
// if v, err := c.db.BGetBit(args[0], offset); err != nil {
// return err
// } else {
// c.resp.writeInteger(int64(v))
// }
// return nil
// }
func bmsetbitCommand(c *client) error {
args := c.args
if len(args) < 3 {
return ErrCmdParams
}
// func bmsetbitCommand(c *client) error {
// args := c.args
// if len(args) < 3 {
// return ErrCmdParams
// }
key := args[0]
if len(args[1:])&1 != 0 {
return ErrCmdParams
} else {
args = args[1:]
}
// key := args[0]
// if len(args[1:])&1 != 0 {
// return ErrCmdParams
// } else {
// args = args[1:]
// }
var err error
var offset int32
var val int8
// var err error
// var offset int32
// var val int8
pairs := make([]ledis.BitPair, len(args)>>1)
for i := 0; i < len(pairs); i++ {
offset, err = ledis.StrInt32(args[i<<1], nil)
// pairs := make([]ledis.BitPair, len(args)>>1)
// for i := 0; i < len(pairs); i++ {
// offset, err = ledis.StrInt32(args[i<<1], nil)
if err != nil {
return ErrOffset
}
// if err != nil {
// return ErrOffset
// }
val, err = ledis.StrInt8(args[i<<1+1], nil)
if val != 0 && val != 1 {
return ErrBool
}
// val, err = ledis.StrInt8(args[i<<1+1], nil)
// if val != 0 && val != 1 {
// return ErrBool
// }
if err != nil {
return ErrBool
}
// if err != nil {
// return ErrBool
// }
pairs[i].Pos = offset
pairs[i].Val = uint8(val)
}
// pairs[i].Pos = offset
// pairs[i].Val = uint8(val)
// }
if place, err := c.db.BMSetBit(key, pairs...); err != nil {
return err
} else {
c.resp.writeInteger(place)
}
return nil
}
// if place, err := c.db.BMSetBit(key, pairs...); err != nil {
// return err
// } else {
// c.resp.writeInteger(place)
// }
// return nil
// }
func bcountCommand(c *client) error {
args := c.args
argCnt := len(args)
// func bcountCommand(c *client) error {
// args := c.args
// argCnt := len(args)
if !(argCnt > 0 && argCnt <= 3) {
return ErrCmdParams
}
// if !(argCnt > 0 && argCnt <= 3) {
// return ErrCmdParams
// }
// BCount(key []byte, start int32, end int32) (cnt int32, err error) {
// // BCount(key []byte, start int32, end int32) (cnt int32, err error) {
var err error
var start, end int32 = 0, -1
// var err error
// var start, end int32 = 0, -1
if argCnt > 1 {
start, err = ledis.StrInt32(args[1], nil)
if err != nil {
return ErrValue
}
}
// if argCnt > 1 {
// start, err = ledis.StrInt32(args[1], nil)
// if err != nil {
// return ErrValue
// }
// }
if argCnt > 2 {
end, err = ledis.StrInt32(args[2], nil)
if err != nil {
return ErrValue
}
}
// if argCnt > 2 {
// end, err = ledis.StrInt32(args[2], nil)
// if err != nil {
// return ErrValue
// }
// }
if cnt, err := c.db.BCount(args[0], start, end); err != nil {
return err
} else {
c.resp.writeInteger(int64(cnt))
}
return nil
}
// if cnt, err := c.db.BCount(args[0], start, end); err != nil {
// return err
// } else {
// c.resp.writeInteger(int64(cnt))
// }
// return nil
// }
func boptCommand(c *client) error {
args := c.args
if len(args) < 2 {
return ErrCmdParams
}
// func boptCommand(c *client) error {
// args := c.args
// if len(args) < 2 {
// return ErrCmdParams
// }
opDesc := strings.ToLower(hack.String(args[0]))
dstKey := args[1]
srcKeys := args[2:]
// opDesc := strings.ToLower(hack.String(args[0]))
// dstKey := args[1]
// srcKeys := args[2:]
var op uint8
switch opDesc {
case "and":
op = ledis.OPand
case "or":
op = ledis.OPor
case "xor":
op = ledis.OPxor
case "not":
op = ledis.OPnot
default:
return ErrCmdParams
}
// var op uint8
// switch opDesc {
// case "and":
// op = ledis.OPand
// case "or":
// op = ledis.OPor
// case "xor":
// op = ledis.OPxor
// case "not":
// op = ledis.OPnot
// default:
// return ErrCmdParams
// }
if len(srcKeys) == 0 {
return ErrCmdParams
}
if blen, err := c.db.BOperation(op, dstKey, srcKeys...); err != nil {
return err
} else {
c.resp.writeInteger(int64(blen))
}
return nil
}
// if len(srcKeys) == 0 {
// return ErrCmdParams
// }
// if blen, err := c.db.BOperation(op, dstKey, srcKeys...); err != nil {
// return err
// } else {
// c.resp.writeInteger(int64(blen))
// }
// return nil
// }
func bexpireCommand(c *client) error {
args := c.args
if len(args) != 2 {
return ErrCmdParams
}
// func bexpireCommand(c *client) error {
// args := c.args
// if len(args) != 2 {
// return ErrCmdParams
// }
duration, err := ledis.StrInt64(args[1], nil)
if err != nil {
return ErrValue
}
// duration, err := ledis.StrInt64(args[1], nil)
// if err != nil {
// return ErrValue
// }
if v, err := c.db.BExpire(args[0], duration); err != nil {
return err
} else {
c.resp.writeInteger(v)
}
// if v, err := c.db.BExpire(args[0], duration); err != nil {
// return err
// } else {
// c.resp.writeInteger(v)
// }
return nil
}
// return nil
// }
func bexpireAtCommand(c *client) error {
args := c.args
if len(args) != 2 {
return ErrCmdParams
}
// func bexpireAtCommand(c *client) error {
// args := c.args
// if len(args) != 2 {
// return ErrCmdParams
// }
when, err := ledis.StrInt64(args[1], nil)
if err != nil {
return ErrValue
}
// when, err := ledis.StrInt64(args[1], nil)
// if err != nil {
// return ErrValue
// }
if v, err := c.db.BExpireAt(args[0], when); err != nil {
return err
} else {
c.resp.writeInteger(v)
}
// if v, err := c.db.BExpireAt(args[0], when); err != nil {
// return err
// } else {
// c.resp.writeInteger(v)
// }
return nil
}
// return nil
// }
func bttlCommand(c *client) error {
args := c.args
if len(args) != 1 {
return ErrCmdParams
}
// func bttlCommand(c *client) error {
// args := c.args
// if len(args) != 1 {
// return ErrCmdParams
// }
if v, err := c.db.BTTL(args[0]); err != nil {
return err
} else {
c.resp.writeInteger(v)
}
// if v, err := c.db.BTTL(args[0]); err != nil {
// return err
// } else {
// c.resp.writeInteger(v)
// }
return nil
}
// return nil
// }
func bpersistCommand(c *client) error {
args := c.args
if len(args) != 1 {
return ErrCmdParams
}
// func bpersistCommand(c *client) error {
// args := c.args
// if len(args) != 1 {
// return ErrCmdParams
// }
if n, err := c.db.BPersist(args[0]); err != nil {
return err
} else {
c.resp.writeInteger(n)
}
// if n, err := c.db.BPersist(args[0]); err != nil {
// return err
// } else {
// c.resp.writeInteger(n)
// }
return nil
}
// return nil
// }
func bxscanCommand(c *client) error {
return xscanGeneric(c, c.db.BScan)
}
func bxrevscanCommand(c *client) error {
return xscanGeneric(c, c.db.BRevScan)
}
func init() {
register("bget", bgetCommand)
register("bdelete", bdeleteCommand)
register("bsetbit", bsetbitCommand)
register("bgetbit", bgetbitCommand)
register("bmsetbit", bmsetbitCommand)
register("bcount", bcountCommand)
register("bopt", boptCommand)
register("bexpire", bexpireCommand)
register("bexpireat", bexpireAtCommand)
register("bttl", bttlCommand)
register("bpersist", bpersistCommand)
register("bxscan", bxscanCommand)
register("bxrevscan", bxrevscanCommand)
register("xbscan", bxscanCommand)
register("xbrevscan", bxrevscanCommand)
}
// func init() {
// register("bget", bgetCommand)
// register("bdelete", bdeleteCommand)
// register("bsetbit", bsetbitCommand)
// register("bgetbit", bgetbitCommand)
// register("bmsetbit", bmsetbitCommand)
// register("bcount", bcountCommand)
// register("bopt", boptCommand)
// register("bexpire", bexpireCommand)
// register("bexpireat", bexpireAtCommand)
// register("bttl", bttlCommand)
// register("bpersist", bpersistCommand)
// }

View File

@ -1,7 +1,7 @@
package server
// import (
// "github.com/siddontang/ledisdb/client/go/ledis"
// "github.com/siddontang/ledisdb/client/goledis"
// "testing"
// )

View File

@ -292,20 +292,12 @@ func hpersistCommand(c *client) error {
return nil
}
func hxscanCommand(c *client) error {
return xscanGeneric(c, c.db.HScan)
}
func hxrevscanCommand(c *client) error {
return xscanGeneric(c, c.db.HRevScan)
}
func xhexistsCommand(c *client) error {
func hkeyexistsCommand(c *client) error {
args := c.args
if len(args) != 1 {
return ErrCmdParams
}
if n, err := c.db.XHExists(args[0]); err != nil {
if n, err := c.db.HKeyExists(args[0]); err != nil {
return err
} else {
c.resp.writeInteger(n)
@ -334,9 +326,5 @@ func init() {
register("hexpireat", hexpireAtCommand)
register("httl", httlCommand)
register("hpersist", hpersistCommand)
register("hxscan", hxscanCommand)
register("hxrevscan", hxrevscanCommand)
register("xhscan", hxscanCommand)
register("xhrevscan", hxrevscanCommand)
register("xhexists", xhexistsCommand)
register("hkeyexists", hkeyexistsCommand)
}

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"strconv"
"testing"
)
@ -12,7 +12,7 @@ func TestHash(t *testing.T) {
defer c.Close()
key := []byte("a")
if n, err := ledis.Int(c.Do("xhexists", key)); err != nil {
if n, err := ledis.Int(c.Do("hkeyexists", key)); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
@ -23,7 +23,7 @@ func TestHash(t *testing.T) {
} else if n != 1 {
t.Fatal(n)
}
if n, err := ledis.Int(c.Do("xhexists", key)); err != nil {
if n, err := ledis.Int(c.Do("hkeyexists", key)); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)

View File

@ -1,10 +1,8 @@
package server
import (
"github.com/siddontang/go/hack"
"github.com/siddontang/ledisdb/ledis"
"strconv"
"strings"
)
// func getCommand(c *client) error {
@ -315,82 +313,6 @@ func persistCommand(c *client) error {
return nil
}
func parseScanArgs(c *client) (key []byte, match string, count int, err error) {
args := c.args
count = 10
switch len(args) {
case 0:
key = nil
return
case 1, 3, 5:
key = args[0]
break
default:
err = ErrCmdParams
return
}
if len(args) == 3 {
switch strings.ToLower(hack.String(args[1])) {
case "match":
match = hack.String(args[2])
case "count":
count, err = strconv.Atoi(hack.String(args[2]))
default:
err = ErrCmdParams
return
}
} else if len(args) == 5 {
if strings.ToLower(hack.String(args[1])) != "match" {
err = ErrCmdParams
return
} else if strings.ToLower(hack.String(args[3])) != "count" {
err = ErrCmdParams
return
}
match = hack.String(args[2])
count, err = strconv.Atoi(hack.String(args[4]))
}
if count <= 0 {
err = ErrCmdParams
}
return
}
func xscanGeneric(c *client,
f func(key []byte, count int, inclusive bool, match string) ([][]byte, error)) error {
key, match, count, err := parseScanArgs(c)
if err != nil {
return err
}
if ay, err := f(key, count, false, match); err != nil {
return err
} else {
data := make([]interface{}, 2)
if len(ay) < count {
data[0] = []byte("")
} else {
data[0] = ay[len(ay)-1]
}
data[1] = ay
c.resp.writeArray(data)
}
return nil
}
func xscanCommand(c *client) error {
return xscanGeneric(c, c.db.Scan)
}
func xrevscanCommand(c *client) error {
return xscanGeneric(c, c.db.RevScan)
}
func appendCommand(c *client) error {
args := c.args
if len(args) != 2 {
@ -619,6 +541,4 @@ func init() {
register("expireat", expireAtCommand)
register("ttl", ttlCommand)
register("persist", persistCommand)
register("xscan", xscanCommand)
register("xrevscan", xrevscanCommand)
}

View File

@ -1,7 +1,7 @@
package server
import (
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"testing"
)

View File

@ -231,14 +231,6 @@ func lpersistCommand(c *client) error {
return nil
}
func lxscanCommand(c *client) error {
return xscanGeneric(c, c.db.LScan)
}
func lxrevscanCommand(c *client) error {
return xscanGeneric(c, c.db.LRevScan)
}
func blpopCommand(c *client) error {
keys, timeout, err := lParseBPopArgs(c)
if err != nil {
@ -285,12 +277,13 @@ func lParseBPopArgs(c *client) (keys [][]byte, timeout time.Duration, err error)
keys = args[0 : len(args)-1]
return
}
func xlexistsCommand(c *client) error {
func lkeyexistsCommand(c *client) error {
args := c.args
if len(args) != 1 {
return ErrCmdParams
}
if n, err := c.db.XLExists(args[0]); err != nil {
if n, err := c.db.LKeyExists(args[0]); err != nil {
return err
} else {
c.resp.writeInteger(n)
@ -317,9 +310,5 @@ func init() {
register("lexpireat", lexpireAtCommand)
register("lttl", lttlCommand)
register("lpersist", lpersistCommand)
register("lxscan", lxscanCommand)
register("lxrevscan", lxrevscanCommand)
register("xlscan", lxscanCommand)
register("xlrevscan", lxrevscanCommand)
register("xlexists", xlexistsCommand)
register("lkeyexists", lkeyexistsCommand)
}

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"strconv"
"testing"
)
@ -58,7 +58,7 @@ func TestList(t *testing.T) {
defer c.Close()
key := []byte("a")
if n, err := ledis.Int(c.Do("xlexists", key)); err != nil {
if n, err := ledis.Int(c.Do("lkeyexists", key)); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
@ -70,7 +70,7 @@ func TestList(t *testing.T) {
t.Fatal(n)
}
if n, err := ledis.Int(c.Do("xlexists", key)); err != nil {
if n, err := ledis.Int(c.Do("lkeyexists", key)); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(1)

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
goledis "github.com/siddontang/ledisdb/client/go/ledis"
goledis "github.com/siddontang/ledisdb/client/goledis"
"github.com/siddontang/ledisdb/ledis"
"strings"
"time"
@ -165,15 +165,15 @@ func xttl(db *ledis.DB, tp string, key []byte) (int64, error) {
func xscan(db *ledis.DB, tp string, count int) ([][]byte, error) {
switch strings.ToUpper(tp) {
case "KV":
return db.Scan(nil, count, false, "")
return db.Scan(KV, nil, count, false, "")
case "HASH":
return db.HScan(nil, count, false, "")
return db.Scan(HASH, nil, count, false, "")
case "LIST":
return db.LScan(nil, count, false, "")
return db.Scan(LIST, nil, count, false, "")
case "SET":
return db.SScan(nil, count, false, "")
return db.Scan(SET, nil, count, false, "")
case "ZSET":
return db.ZScan(nil, count, false, "")
return db.Scan(ZSET, nil, count, false, "")
default:
return nil, fmt.Errorf("invalid key type %s", tp)
}
@ -239,8 +239,8 @@ func xmigratedbCommand(c *client) error {
db, err := ledis.StrUint64(args[4], nil)
if err != nil {
return err
} else if db >= uint64(ledis.MaxDBNumber) {
return fmt.Errorf("invalid db index %d, must < %d", db, ledis.MaxDBNumber)
} else if db >= uint64(c.app.cfg.Databases) {
return fmt.Errorf("invalid db index %d, must < %d", db, c.app.cfg.Databases)
}
timeout, err := ledis.StrInt64(args[5], nil)
@ -266,11 +266,13 @@ func xmigratedbCommand(c *client) error {
mc := c.app.getMigrateClient(addr)
conn := mc.Get()
conn, err := mc.Get()
if err != nil {
return err
}
//timeout is milliseconds
t := time.Duration(timeout) * time.Millisecond
conn.SetConnectTimeout(t)
if _, err = conn.Do("select", db); err != nil {
return err
@ -326,8 +328,8 @@ func xmigrateCommand(c *client) error {
db, err := ledis.StrUint64(args[4], nil)
if err != nil {
return err
} else if db >= uint64(ledis.MaxDBNumber) {
return fmt.Errorf("invalid db index %d, must < %d", db, ledis.MaxDBNumber)
} else if db >= uint64(c.app.cfg.Databases) {
return fmt.Errorf("invalid db index %d, must < %d", db, c.app.cfg.Databases)
}
timeout, err := ledis.StrInt64(args[5], nil)
@ -358,11 +360,13 @@ func xmigrateCommand(c *client) error {
mc := c.app.getMigrateClient(addr)
conn := mc.Get()
conn, err := mc.Get()
if err != nil {
return err
}
//timeout is milliseconds
t := time.Duration(timeout) * time.Millisecond
conn.SetConnectTimeout(t)
if _, err = conn.Do("select", db); err != nil {
return err

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"github.com/siddontang/ledisdb/config"
"os"
"testing"
@ -80,10 +80,10 @@ func TestMigrate(t *testing.T) {
time.Sleep(1 * time.Second)
c1 := ledis.NewConn(s1Cfg.Addr)
c1, _ := ledis.Connect(s1Cfg.Addr)
defer c1.Close()
c2 := ledis.NewConn(s2Cfg.Addr)
c2, _ := ledis.Connect(s2Cfg.Addr)
defer c2.Close()
if _, err = c1.Do("set", "a", "1"); err != nil {

View File

@ -131,7 +131,7 @@ func syncCommand(c *client) error {
c.syncBuf.Write(dummyBuf)
if _, _, err := c.app.ldb.ReadLogsToTimeout(logId, &c.syncBuf, 30, c.app.quit); err != nil {
if _, _, err := c.app.ldb.ReadLogsToTimeout(logId, &c.syncBuf, 1, c.app.quit); err != nil {
return err
} else {
buf := c.syncBuf.Bytes()
@ -157,6 +157,10 @@ func replconfCommand(c *client) error {
return ErrCmdParams
}
if !c.app.ldb.ReplicationUsed() {
return ledis.ErrRplNotSupport
}
//now only support "listening-port"
for i := 0; i < len(args); i += 2 {
switch strings.ToLower(hack.String(args[i])) {
@ -188,9 +192,11 @@ func roleCommand(c *client) error {
}
c.app.m.Lock()
isMaster := len(c.app.cfg.SlaveOf) == 0
slaveof := c.app.cfg.SlaveOf
c.app.m.Unlock()
isMaster := len(slaveof) == 0
ay := make([]interface{}, 0, 5)
var lastId int64 = 0
@ -217,7 +223,7 @@ func roleCommand(c *client) error {
c.app.slock.Unlock()
ay = append(ay, items)
} else {
host, port, _ := splitHostPort(c.app.cfg.Addr)
host, port, _ := splitHostPort(slaveof)
ay = append(ay, []byte("slave"))
ay = append(ay, []byte(host))
ay = append(ay, int64(port))

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
goledis "github.com/siddontang/ledisdb/client/go/ledis"
goledis "github.com/siddontang/ledisdb/client/goledis"
"github.com/siddontang/ledisdb/config"
"os"
"reflect"
@ -14,8 +14,8 @@ func checkDataEqual(master *App, slave *App) error {
mdb, _ := master.ldb.Select(0)
sdb, _ := slave.ldb.Select(0)
mkeys, _ := mdb.Scan(nil, 100, true, "")
skeys, _ := sdb.Scan(nil, 100, true, "")
mkeys, _ := mdb.Scan(KV, nil, 100, true, "")
skeys, _ := sdb.Scan(KV, nil, 100, true, "")
if len(mkeys) != len(skeys) {
return fmt.Errorf("keys number not equal %d != %d", len(mkeys), len(skeys))
@ -140,7 +140,7 @@ func TestReplication(t *testing.T) {
if err = checkTestRole(slaveCfg.Addr, []interface{}{
[]byte("slave"),
[]byte("127.0.0.1"),
int64(11183),
int64(11182),
[]byte("connected"),
int64(sStat.LastID),
}); err != nil {
@ -159,7 +159,7 @@ func TestReplication(t *testing.T) {
}
func checkTestRole(addr string, checkRoles []interface{}) error {
conn := goledis.NewConn(addr)
conn, _ := goledis.Connect(addr)
defer conn.Close()
roles, err := goledis.MultiBulk(conn.Do("ROLE"))
if err != nil {

215
server/cmd_scan.go Normal file
View File

@ -0,0 +1,215 @@
package server
import (
"fmt"
"github.com/siddontang/go/hack"
"github.com/siddontang/go/num"
"github.com/siddontang/ledisdb/ledis"
"strconv"
"strings"
)
func parseScanArgs(args [][]byte) (cursor []byte, match string, count int, err error) {
cursor = args[0]
args = args[1:]
count = 10
for i := 0; i < len(args); {
switch strings.ToUpper(hack.String(args[i])) {
case "MATCH":
if i+1 >= len(args) {
err = ErrCmdParams
return
}
match = hack.String(args[i+1])
i = i + 2
case "COUNT":
if i+1 >= len(args) {
err = ErrCmdParams
return
}
count, err = strconv.Atoi(hack.String(args[i+1]))
if err != nil {
return
}
i = i + 2
default:
err = fmt.Errorf("invalid argument %s", args[i])
return
}
}
return
}
// XSCAN type cursor [MATCH match] [COUNT count]
func xscanCommand(c *client) error {
args := c.args
if len(args) < 2 {
return ErrCmdParams
}
var dataType ledis.DataType
switch strings.ToUpper(hack.String(args[0])) {
case "KV":
dataType = ledis.KV
case "HASH":
dataType = ledis.HASH
case "LIST":
dataType = ledis.LIST
case "SET":
dataType = ledis.SET
case "ZSET":
dataType = ledis.ZSET
default:
return fmt.Errorf("invalid key type %s", args[0])
}
cursor, match, count, err := parseScanArgs(args[1:])
if err != nil {
return err
}
ay, err := c.db.Scan(dataType, cursor, count, false, match)
if err != nil {
return err
}
data := make([]interface{}, 2)
if len(ay) < count {
data[0] = []byte("")
} else {
data[0] = ay[len(ay)-1]
}
data[1] = ay
c.resp.writeArray(data)
return nil
}
// XHSCAN key cursor [MATCH match] [COUNT count]
func xhscanCommand(c *client) error {
args := c.args
if len(args) < 2 {
return ErrCmdParams
}
key := args[0]
cursor, match, count, err := parseScanArgs(args[1:])
if err != nil {
return err
}
ay, err := c.db.HScan(key, cursor, count, false, match)
if err != nil {
return err
}
data := make([]interface{}, 2)
if len(ay) < count {
data[0] = []byte("")
} else {
data[0] = ay[len(ay)-1].Field
}
vv := make([][]byte, 0, len(ay)*2)
for _, v := range ay {
vv = append(vv, v.Field, v.Value)
}
data[1] = vv
c.resp.writeArray(data)
return nil
}
// XSSCAN key cursor [MATCH match] [COUNT count]
func xsscanCommand(c *client) error {
args := c.args
if len(args) < 2 {
return ErrCmdParams
}
key := args[0]
cursor, match, count, err := parseScanArgs(args[1:])
if err != nil {
return err
}
ay, err := c.db.SScan(key, cursor, count, false, match)
if err != nil {
return err
}
data := make([]interface{}, 2)
if len(ay) < count {
data[0] = []byte("")
} else {
data[0] = ay[len(ay)-1]
}
data[1] = ay
c.resp.writeArray(data)
return nil
}
// XZSCAN key cursor [MATCH match] [COUNT count]
func xzscanCommand(c *client) error {
args := c.args
if len(args) < 2 {
return ErrCmdParams
}
key := args[0]
cursor, match, count, err := parseScanArgs(args[1:])
if err != nil {
return err
}
ay, err := c.db.ZScan(key, cursor, count, false, match)
if err != nil {
return err
}
data := make([]interface{}, 2)
if len(ay) < count {
data[0] = []byte("")
} else {
data[0] = ay[len(ay)-1].Member
}
vv := make([][]byte, 0, len(ay)*2)
for _, v := range ay {
vv = append(vv, v.Member, num.FormatInt64ToSlice(v.Score))
}
data[1] = vv
c.resp.writeArray(data)
return nil
}
func init() {
register("xscan", xscanCommand)
register("xhscan", xhscanCommand)
register("xsscan", xsscanCommand)
register("xzscan", xzscanCommand)
}

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"github.com/siddontang/ledisdb/config"
"os"
"testing"
@ -29,14 +29,14 @@ func TestScan(t *testing.T) {
defer c.Close()
testKVScan(t, c)
testHashScan(t, c)
testListScan(t, c)
testZSetScan(t, c)
testSetScan(t, c)
testHashKeyScan(t, c)
testListKeyScan(t, c)
testZSetKeyScan(t, c)
testSetKeyScan(t, c)
}
func checkScanValues(t *testing.T, ay interface{}, values ...int) {
func checkScanValues(t *testing.T, ay interface{}, values ...interface{}) {
a, err := ledis.Strings(ay, nil)
if err != nil {
t.Fatal(err)
@ -47,14 +47,14 @@ func checkScanValues(t *testing.T, ay interface{}, values ...int) {
}
for i, v := range a {
if string(v) != fmt.Sprintf("%d", values[i]) {
t.Fatal(fmt.Sprintf("%d %s != %d", string(v), values[i]))
if string(v) != fmt.Sprintf("%v", values[i]) {
t.Fatal(fmt.Sprintf("%d %s != %v", string(v), values[i]))
}
}
}
func checkScan(t *testing.T, c *ledis.Client, cmd string) {
if ay, err := ledis.Values(c.Do(cmd, "", "count", 5)); err != nil {
func checkScan(t *testing.T, c *ledis.Client, tp string) {
if ay, err := ledis.Values(c.Do("XSCAN", tp, "", "count", 5)); err != nil {
t.Fatal(err)
} else if len(ay) != 2 {
t.Fatal(len(ay))
@ -64,7 +64,7 @@ func checkScan(t *testing.T, c *ledis.Client, cmd string) {
checkScanValues(t, ay[1], 0, 1, 2, 3, 4)
}
if ay, err := ledis.Values(c.Do(cmd, "4", "count", 6)); err != nil {
if ay, err := ledis.Values(c.Do("XSCAN", tp, "4", "count", 6)); err != nil {
t.Fatal(err)
} else if len(ay) != 2 {
t.Fatal(len(ay))
@ -76,29 +76,6 @@ func checkScan(t *testing.T, c *ledis.Client, cmd string) {
}
func checkRevScan(t *testing.T, c *ledis.Client, cmd string) {
if ay, err := ledis.Values(c.Do(cmd, "", "count", 5)); err != nil {
t.Fatal(err)
} else if len(ay) != 2 {
t.Fatal(len(ay))
} else if n := ay[0].([]byte); string(n) != "5" {
t.Fatal(string(n))
} else {
checkScanValues(t, ay[1], 9, 8, 7, 6, 5)
}
if ay, err := ledis.Values(c.Do(cmd, "5", "count", 6)); err != nil {
t.Fatal(err)
} else if len(ay) != 2 {
t.Fatal(len(ay))
} else if n := ay[0].([]byte); string(n) != "" {
t.Fatal(string(n))
} else {
checkScanValues(t, ay[1], 4, 3, 2, 1, 0)
}
}
func testKVScan(t *testing.T, c *ledis.Client) {
for i := 0; i < 10; i++ {
if _, err := c.Do("set", fmt.Sprintf("%d", i), []byte("value")); err != nil {
@ -106,50 +83,95 @@ func testKVScan(t *testing.T, c *ledis.Client) {
}
}
checkScan(t, c, "xscan")
checkRevScan(t, c, "xrevscan")
checkScan(t, c, "KV")
}
func testHashScan(t *testing.T, c *ledis.Client) {
func testHashKeyScan(t *testing.T, c *ledis.Client) {
for i := 0; i < 10; i++ {
if _, err := c.Do("hset", fmt.Sprintf("%d", i), fmt.Sprintf("%d", i), []byte("value")); err != nil {
t.Fatal(err)
}
}
checkScan(t, c, "xhscan")
checkRevScan(t, c, "xhrevscan")
checkScan(t, c, "HASH")
}
func testListScan(t *testing.T, c *ledis.Client) {
func testListKeyScan(t *testing.T, c *ledis.Client) {
for i := 0; i < 10; i++ {
if _, err := c.Do("lpush", fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)); err != nil {
t.Fatal(err)
}
}
checkScan(t, c, "xlscan")
checkRevScan(t, c, "xlrevscan")
checkScan(t, c, "LIST")
}
func testZSetScan(t *testing.T, c *ledis.Client) {
func testZSetKeyScan(t *testing.T, c *ledis.Client) {
for i := 0; i < 10; i++ {
if _, err := c.Do("zadd", fmt.Sprintf("%d", i), i, []byte("value")); err != nil {
t.Fatal(err)
}
}
checkScan(t, c, "zxscan")
checkRevScan(t, c, "zxrevscan")
checkScan(t, c, "ZSET")
}
func testSetScan(t *testing.T, c *ledis.Client) {
func testSetKeyScan(t *testing.T, c *ledis.Client) {
for i := 0; i < 10; i++ {
if _, err := c.Do("sadd", fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)); err != nil {
t.Fatal(err)
}
}
checkScan(t, c, "xsscan")
checkRevScan(t, c, "xsrevscan")
checkScan(t, c, "SET")
}
func TestHashScan(t *testing.T) {
c := getTestConn()
defer c.Close()
key := "scan_hash"
c.Do("HMSET", key, "a", 1, "b", 2)
if ay, err := ledis.Values(c.Do("XHSCAN", key, "")); err != nil {
t.Fatal(err)
} else if len(ay) != 2 {
t.Fatal(len(ay))
} else {
checkScanValues(t, ay[1], "a", 1, "b", 2)
}
}
func TestSetScan(t *testing.T) {
c := getTestConn()
defer c.Close()
key := "scan_set"
c.Do("SADD", key, "a", "b")
if ay, err := ledis.Values(c.Do("XSSCAN", key, "")); err != nil {
t.Fatal(err)
} else if len(ay) != 2 {
t.Fatal(len(ay))
} else {
checkScanValues(t, ay[1], "a", "b")
}
}
func TestZSetScan(t *testing.T) {
c := getTestConn()
defer c.Close()
key := "scan_zset"
c.Do("ZADD", key, 1, "a", 2, "b")
if ay, err := ledis.Values(c.Do("XZSCAN", key, "")); err != nil {
t.Fatal(err)
} else if len(ay) != 2 {
t.Fatal(len(ay))
} else {
checkScanValues(t, ay[1], "a", 1, "b", 2)
}
}

View File

@ -4,7 +4,7 @@ package server
import (
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"reflect"
"testing"
)

View File

@ -111,6 +111,23 @@ func timeCommand(c *client) error {
return nil
}
func configGetCommand(c *client) error {
args := c.args
if len(args) != 2 {
return ErrCmdParams
}
ay := make([][]byte, 0, 2)
key := hack.String(args[1])
switch key {
case "databases":
ay = append(ay, []byte("databases"), num.FormatUint8ToSlice(c.app.cfg.Databases))
}
c.resp.writeSliceArray(ay)
return nil
}
func configCommand(c *client) error {
if len(c.args) < 1 {
return ErrCmdParams
@ -124,6 +141,8 @@ func configCommand(c *client) error {
c.resp.writeStatus(OK)
return nil
}
case "get":
return configGetCommand(c)
default:
return ErrCmdParams
}

View File

@ -262,20 +262,12 @@ func spersistCommand(c *client) error {
return nil
}
func sxscanCommand(c *client) error {
return xscanGeneric(c, c.db.SScan)
}
func sxrevscanCommand(c *client) error {
return xscanGeneric(c, c.db.SRevScan)
}
func xsexistsCommand(c *client) error {
func skeyexistsCommand(c *client) error {
args := c.args
if len(args) != 1 {
return ErrCmdParams
}
if n, err := c.db.XSExists(args[0]); err != nil {
if n, err := c.db.SKeyExists(args[0]); err != nil {
return err
} else {
c.resp.writeInteger(n)
@ -295,16 +287,13 @@ func init() {
register("srem", sremCommand)
register("sunion", sunionCommand)
register("sunionstore", sunionstoreCommand)
register("sclear", sclearCommand)
register("smclear", smclearCommand)
register("sexpire", sexpireCommand)
register("sexpireat", sexpireAtCommand)
register("sttl", sttlCommand)
register("spersist", spersistCommand)
register("sxscan", sxscanCommand)
register("sxrevscan", sxrevscanCommand)
register("xsscan", sxscanCommand)
register("xsrevscan", sxrevscanCommand)
register("xsexists", xsexistsCommand)
register("skeyexists", skeyexistsCommand)
}

View File

@ -1,7 +1,7 @@
package server
import (
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"testing"
)
@ -12,7 +12,7 @@ func TestSet(t *testing.T) {
key1 := "testdb_cmd_set_1"
key2 := "testdb_cmd_set_2"
if n, err := ledis.Int(c.Do("xsexists", key1)); err != nil {
if n, err := ledis.Int(c.Do("skeyexists", key1)); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
@ -24,7 +24,7 @@ func TestSet(t *testing.T) {
t.Fatal(n)
}
if n, err := ledis.Int(c.Do("xsexists", key1)); err != nil {
if n, err := ledis.Int(c.Do("skeyexists", key1)); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
goledis "github.com/siddontang/ledisdb/client/go/ledis"
goledis "github.com/siddontang/ledisdb/client/goledis"
"testing"
)

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"testing"
"time"
)

View File

@ -641,14 +641,6 @@ func zinterstoreCommand(c *client) error {
return err
}
func zxscanCommand(c *client) error {
return xscanGeneric(c, c.db.ZScan)
}
func zxrevscanCommand(c *client) error {
return xscanGeneric(c, c.db.ZRevScan)
}
func zparseMemberRange(minBuf []byte, maxBuf []byte) (min []byte, max []byte, rangeType uint8, err error) {
rangeType = store.RangeClose
if strings.ToLower(hack.String(minBuf)) == "-" {
@ -771,12 +763,12 @@ func zlexcountCommand(c *client) error {
return nil
}
func xzexistsCommand(c *client) error {
func zkeyexistsCommand(c *client) error {
args := c.args
if len(args) != 1 {
return ErrCmdParams
}
if n, err := c.db.XZExists(args[0]); err != nil {
if n, err := c.db.ZKeyExists(args[0]); err != nil {
return err
} else {
c.resp.writeInteger(n)
@ -815,9 +807,5 @@ func init() {
register("zexpireat", zexpireAtCommand)
register("zttl", zttlCommand)
register("zpersist", zpersistCommand)
register("zxscan", zxscanCommand)
register("zxrevscan", zxrevscanCommand)
register("xzscan", zxscanCommand)
register("xzrevscan", zxrevscanCommand)
register("xzexists", xzexistsCommand)
register("zkeyexists", zkeyexistsCommand)
}

View File

@ -2,7 +2,7 @@ package server
import (
"fmt"
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/client/goledis"
"reflect"
"strconv"
"testing"
@ -14,7 +14,7 @@ func TestZSet(t *testing.T) {
key := []byte("myzset")
if n, err := ledis.Int(c.Do("xzexists", key)); err != nil {
if n, err := ledis.Int(c.Do("zkeyexists", key)); err != nil {
t.Fatal(err)
} else if n != 0 {
t.Fatal(n)
@ -26,7 +26,7 @@ func TestZSet(t *testing.T) {
t.Fatal(n)
}
if n, err := ledis.Int(c.Do("xzexists", key)); err != nil {
if n, err := ledis.Int(c.Do("zkeyexists", key)); err != nil {
t.Fatal(err)
} else if n != 1 {
t.Fatal(n)

View File

@ -2,6 +2,7 @@ package server
import (
"errors"
"github.com/siddontang/ledisdb/ledis"
)
var (
@ -26,12 +27,11 @@ var (
)
const (
KV = iota
LIST
HASH
SET
ZSET
BIT
KV ledis.DataType = ledis.KV
LIST = ledis.LIST
HASH = ledis.HASH
SET = ledis.SET
ZSET = ledis.ZSET
)
const (

View File

@ -205,6 +205,18 @@ func (i *info) dumpReplication(buf *bytes.Buffer) {
}
i.app.slock.Unlock()
i.app.m.Lock()
slaveof := i.app.cfg.SlaveOf
i.app.m.Unlock()
isSlave := len(slaveof) > 0
if isSlave {
p = append(p, infoPair{"role", "slave"})
} else {
p = append(p, infoPair{"role", "master"})
}
num := i.Replication.PubLogNum.Get()
p = append(p, infoPair{"pub_log_num", num})
@ -216,13 +228,14 @@ func (i *info) dumpReplication(buf *bytes.Buffer) {
p = append(p, infoPair{"pub_log_ack_per_time", 0})
}
p = append(p, infoPair{"slaveof", i.app.cfg.SlaveOf})
p = append(p, infoPair{"slaveof", slaveof})
if len(slaves) > 0 {
p = append(p, infoPair{"slaves", strings.Join(slaves, ",")})
}
if s, _ := i.app.ldb.ReplicationStat(); s != nil {
s, _ := i.app.ldb.ReplicationStat()
if s != nil {
p = append(p, infoPair{"last_log_id", s.LastID})
p = append(p, infoPair{"first_log_id", s.FirstID})
p = append(p, infoPair{"commit_log_id", s.CommitID})
@ -234,6 +247,28 @@ func (i *info) dumpReplication(buf *bytes.Buffer) {
p = append(p, infoPair{"master_last_log_id", i.Replication.MasterLastLogID.Get()})
if isSlave {
// add some redis slave replication info for outer failover service :-)
state := i.app.m.state.Get()
if state == replSyncState || state == replConnectedState {
p = append(p, infoPair{"master_link_status", "up"})
} else {
p = append(p, infoPair{"master_link_status", "down"})
}
// here, all the slaves have same priority now
p = append(p, infoPair{"slave_priority", 100})
if s != nil {
if s.LastID > 0 {
p = append(p, infoPair{"slave_repl_offset", s.LastID})
} else {
p = append(p, infoPair{"slave_repl_offset", s.CommitID})
}
} else {
p = append(p, infoPair{"slave_repl_offset", 0})
}
}
i.dumpPairs(buf, p...)
}

View File

@ -7,7 +7,7 @@ import (
"github.com/siddontang/go/log"
"github.com/siddontang/go/num"
"github.com/siddontang/go/sync2"
goledis "github.com/siddontang/ledisdb/client/go/ledis"
goledis "github.com/siddontang/ledisdb/client/goledis"
"github.com/siddontang/ledisdb/ledis"
"github.com/siddontang/ledisdb/rpl"
"net"
@ -34,10 +34,22 @@ const (
replConnectedState
)
type syncBuffer struct {
m *master
bytes.Buffer
}
func (b *syncBuffer) Write(data []byte) (int, error) {
b.m.state.Set(replSyncState)
n, err := b.Buffer.Write(data)
return n, err
}
type master struct {
sync.Mutex
conn *goledis.Conn
connLock sync.Mutex
conn *goledis.Conn
app *App
@ -47,7 +59,7 @@ type master struct {
wg sync.WaitGroup
syncBuf bytes.Buffer
syncBuf syncBuffer
state sync2.AtomicInt32
}
@ -57,6 +69,7 @@ func newMaster(app *App) *master {
m.app = app
m.quit = make(chan struct{}, 1)
m.syncBuf = syncBuffer{m: m}
m.state.Set(replConnectState)
@ -64,41 +77,45 @@ func newMaster(app *App) *master {
}
func (m *master) Close() {
m.quit <- struct{}{}
m.state.Set(replConnectState)
if !m.isQuited() {
close(m.quit)
}
m.closeConn()
m.wg.Wait()
select {
case <-m.quit:
default:
}
m.state.Set(replConnectState)
}
func (m *master) resetConn() error {
if len(m.addr) == 0 {
return fmt.Errorf("no assign master addr")
}
if m.conn != nil {
m.conn.Close()
}
m.conn = goledis.NewConn(m.addr)
return nil
}
func (m *master) closeConn() {
m.connLock.Lock()
defer m.connLock.Unlock()
if m.conn != nil {
//for replication, we send quit command to close gracefully
m.conn.Send("quit")
m.conn.SetReadDeadline(time.Now().Add(1 * time.Second))
m.conn.Close()
}
m.conn = nil
}
func (m *master) checkConn() error {
m.connLock.Lock()
defer m.connLock.Unlock()
var err error
if m.conn == nil {
m.conn, err = goledis.Connect(m.addr)
} else {
if _, err = m.conn.Do("PING"); err != nil {
m.conn.Close()
m.conn = nil
}
}
return err
}
func (m *master) stopReplication() error {
@ -115,60 +132,88 @@ func (m *master) startReplication(masterAddr string, restart bool) error {
m.app.cfg.SetReadonly(true)
m.quit = make(chan struct{}, 1)
if len(m.addr) == 0 {
return fmt.Errorf("no assign master addr")
}
m.wg.Add(1)
go m.runReplication(restart)
return nil
}
func (m *master) isQuited() bool {
select {
case <-m.quit:
return true
default:
return false
}
}
func (m *master) runReplication(restart bool) {
defer func() {
m.state.Set(replConnectState)
m.wg.Done()
}()
m.state.Set(replConnectingState)
if err := m.resetConn(); err != nil {
log.Errorf("reset conn error %s", err.Error())
return
}
for {
select {
case <-m.quit:
m.state.Set(replConnectState)
if m.isQuited() {
return
default:
if _, err := m.conn.Do("ping"); err != nil {
log.Errorf("ping master %s error %s, try 2s later", m.addr, err.Error())
time.Sleep(2 * time.Second)
continue
}
if err := m.checkConn(); err != nil {
log.Errorf("check master %s connection error %s, try 3s later", m.addr, err.Error())
select {
case <-time.After(3 * time.Second):
case <-m.quit:
return
}
continue
}
if m.isQuited() {
return
}
m.state.Set(replConnectedState)
if err := m.replConf(); err != nil {
log.Errorf("replconf error %s", err.Error())
return
if strings.Contains(err.Error(), ledis.ErrRplNotSupport.Error()) {
log.Fatalf("master doesn't support replication, wait 10s and retry")
select {
case <-time.After(10 * time.Second):
case <-m.quit:
return
}
} else {
log.Errorf("replconf error %s", err.Error())
}
continue
}
if restart {
m.state.Set(replSyncState)
if err := m.fullSync(); err != nil {
log.Errorf("restart fullsync error %s", err.Error())
return
continue
}
m.state.Set(replConnectedState)
}
for {
select {
case <-m.quit:
if err := m.sync(); err != nil {
log.Errorf("sync error %s", err.Error())
break
}
m.state.Set(replConnectedState)
if m.isQuited() {
return
default:
m.state.Set(replConnectedState)
if err := m.sync(); err != nil {
log.Errorf("sync error %s", err.Error())
return
}
}
}
}
@ -198,6 +243,8 @@ func (m *master) fullSync() error {
return err
}
m.state.Set(replSyncState)
dumpPath := path.Join(m.app.cfg.DataDir, "master.dump")
f, err := os.OpenFile(dumpPath, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
@ -245,20 +292,20 @@ func (m *master) sync() error {
return err
}
m.state.Set(replConnectedState)
m.syncBuf.Reset()
if err = m.conn.ReceiveBulkTo(&m.syncBuf); err != nil {
switch err.Error() {
case ledis.ErrLogMissed.Error():
if strings.Contains(err.Error(), ledis.ErrLogMissed.Error()) {
return m.fullSync()
case ledis.ErrRplNotSupport.Error():
m.stopReplication()
return nil
default:
} else {
return err
}
}
m.state.Set(replConnectedState)
buf := m.syncBuf.Bytes()
if len(buf) < 8 {
@ -276,7 +323,6 @@ func (m *master) sync() error {
return nil
}
m.state.Set(replSyncState)
if err = m.app.ldb.StoreLogsFromData(buf); err != nil {
return err
}
@ -302,6 +348,7 @@ func (app *App) slaveof(masterAddr string, restart bool, readonly bool) error {
app.cfg.SlaveOf = masterAddr
if len(masterAddr) == 0 {
log.Infof("slaveof no one, stop replication")
if err := app.m.stopReplication(); err != nil {
return err
}
@ -347,9 +394,7 @@ func (app *App) removeSlave(c *client, activeQuit bool) {
if _, ok := app.slaves[addr]; ok {
delete(app.slaves, addr)
log.Infof("remove slave %s", addr)
if activeQuit {
asyncNotifyUint64(app.slaveSyncAck, c.lastLogID.Get())
}
asyncNotifyUint64(app.slaveSyncAck, c.lastLogID.Get())
}
}

View File

@ -45,7 +45,7 @@ func main() {
wb := db.NewWriteBatch()
for i := uint8(0); i < ledis.MaxDBNumber; i++ {
for i := uint8(0); i < cfg.Databases; i++ {
minK, maxK := oldKeyPair(i)
it := db.RangeIterator(minK, maxK, store.RangeROpen)