fix vet and lint (#337)

This commit is contained in:
siddontang 2018-03-29 20:33:36 +08:00 committed by GitHub
parent 36de957bf3
commit 0c604eb684
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 589 additions and 418 deletions

View File

@ -199,7 +199,7 @@ func loadHisotry() {
func saveHisotry() { func saveHisotry() {
if f, err := os.Create(historyPath); err != nil { if f, err := os.Create(historyPath); err != nil {
fmt.Printf("Error writing history file: ", err) fmt.Printf("Error writing history file, err: %v", err)
} else { } else {
line.WriteHistory(f) line.WriteHistory(f)
f.Close() f.Close()

View File

@ -4,8 +4,10 @@ import (
"errors" "errors"
) )
// Version is for version
const Version = "0.5" const Version = "0.5"
// DataType is defined for the different types
type DataType byte type DataType byte
// for out use // for out use
@ -34,6 +36,7 @@ func (d DataType) String() string {
} }
} }
// For different type name
const ( const (
KVName = "KV" KVName = "KV"
ListName = "LIST" ListName = "LIST"
@ -71,8 +74,8 @@ const (
MetaType byte = 201 MetaType byte = 201
) )
var ( // TypeName is the map of type -> name
TypeName = map[byte]string{ var TypeName = map[byte]string{
KVType: "kv", KVType: "kv",
HashType: "hash", HashType: "hash",
HSizeType: "hsize", HSizeType: "hsize",
@ -87,8 +90,7 @@ var (
SSizeType: "ssize", SSizeType: "ssize",
ExpTimeType: "exptime", ExpTimeType: "exptime",
ExpMetaType: "expmeta", ExpMetaType: "expmeta",
} }
)
const ( const (
defaultScanCount int = 10 defaultScanCount int = 10
@ -104,25 +106,28 @@ var (
errListIndex = errors.New("invalid list index") errListIndex = errors.New("invalid list index")
) )
// For different const size configuration
const ( const (
// max allowed databases
MaxDatabases int = 10240 MaxDatabases int = 10240
//max key size // max key size
MaxKeySize int = 1024 MaxKeySize int = 1024
//max hash field size // max hash field size
MaxHashFieldSize int = 1024 MaxHashFieldSize int = 1024
//max zset member size // max zset member size
MaxZSetMemberSize int = 1024 MaxZSetMemberSize int = 1024
//max set member size // max set member size
MaxSetMemberSize int = 1024 MaxSetMemberSize int = 1024
//max value size // max value size
MaxValueSize int = 1024 * 1024 * 1024 MaxValueSize int = 1024 * 1024 * 1024
) )
// For different common errors
var ( var (
ErrScoreMiss = errors.New("zset score miss") ErrScoreMiss = errors.New("zset score miss")
ErrWriteInROnly = errors.New("write not support in readonly mode") ErrWriteInROnly = errors.New("write not support in readonly mode")
@ -136,6 +141,7 @@ var (
// DBInMulti uint8 = 0x2 // DBInMulti uint8 = 0x2
// ) // )
// For bit operation
const ( const (
BitAND = "and" BitAND = "and"
BitOR = "or" BitOR = "or"

View File

@ -11,26 +11,22 @@ import (
"github.com/siddontang/ledisdb/store" "github.com/siddontang/ledisdb/store"
) )
// DumpHead is the head of a dump.
type DumpHead struct { type DumpHead struct {
CommitID uint64 CommitID uint64
} }
// Read reads meta from the Reader.
func (h *DumpHead) Read(r io.Reader) error { func (h *DumpHead) Read(r io.Reader) error {
if err := binary.Read(r, binary.BigEndian, &h.CommitID); err != nil { return binary.Read(r, binary.BigEndian, &h.CommitID)
return err
}
return nil
} }
// Write writes meta to the Writer
func (h *DumpHead) Write(w io.Writer) error { func (h *DumpHead) Write(w io.Writer) error {
if err := binary.Write(w, binary.BigEndian, h.CommitID); err != nil { return binary.Write(w, binary.BigEndian, h.CommitID)
return err
}
return nil
} }
// DumpFile dumps data to the file
func (l *Ledis) DumpFile(path string) error { func (l *Ledis) DumpFile(path string) error {
f, err := os.Create(path) f, err := os.Create(path)
if err != nil { if err != nil {
@ -41,6 +37,7 @@ func (l *Ledis) DumpFile(path string) error {
return l.Dump(f) return l.Dump(f)
} }
// Dump dumps data to the Writer.
func (l *Ledis) Dump(w io.Writer) error { func (l *Ledis) Dump(w io.Writer) error {
var err error var err error
@ -118,7 +115,7 @@ func (l *Ledis) Dump(w io.Writer) error {
return nil return nil
} }
// clear all data and load dump file to db // LoadDumpFile clears all data and loads dump file to db
func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) { func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) {
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
@ -129,7 +126,7 @@ func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) {
return l.LoadDump(f) return l.LoadDump(f)
} }
// clear all data and load dump file to db // LoadDump clears all data and loads dump file to db
func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) { func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) {
l.wLock.Lock() l.wLock.Lock()
defer l.wLock.Unlock() defer l.wLock.Unlock()

View File

@ -29,95 +29,105 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) {
switch k[1] { switch k[1] {
case KVType: case KVType:
if key, err := db.decodeKVKey(k); err != nil { key, err := db.decodeKVKey(k)
if err != nil {
return nil, err return nil, err
} else {
buf = strconv.AppendQuote(buf, hack.String(key))
} }
buf = strconv.AppendQuote(buf, hack.String(key))
case HashType: case HashType:
if key, field, err := db.hDecodeHashKey(k); err != nil { key, field, err := db.hDecodeHashKey(k)
if err != nil {
return nil, err return nil, err
} else { }
buf = strconv.AppendQuote(buf, hack.String(key)) buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(field)) buf = strconv.AppendQuote(buf, hack.String(field))
}
case HSizeType: case HSizeType:
if key, err := db.hDecodeSizeKey(k); err != nil { key, err := db.hDecodeSizeKey(k)
if err != nil {
return nil, err return nil, err
} else {
buf = strconv.AppendQuote(buf, hack.String(key))
} }
buf = strconv.AppendQuote(buf, hack.String(key))
case ListType: case ListType:
if key, seq, err := db.lDecodeListKey(k); err != nil { key, seq, err := db.lDecodeListKey(k)
if err != nil {
return nil, err return nil, err
} else { }
buf = strconv.AppendQuote(buf, hack.String(key)) buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendInt(buf, int64(seq), 10) buf = strconv.AppendInt(buf, int64(seq), 10)
}
case LMetaType: case LMetaType:
if key, err := db.lDecodeMetaKey(k); err != nil { key, err := db.lDecodeMetaKey(k)
if err != nil {
return nil, err return nil, err
} else {
buf = strconv.AppendQuote(buf, hack.String(key))
} }
buf = strconv.AppendQuote(buf, hack.String(key))
case ZSetType: case ZSetType:
if key, m, err := db.zDecodeSetKey(k); err != nil { key, m, err := db.zDecodeSetKey(k)
if err != nil {
return nil, err return nil, err
} else { }
buf = strconv.AppendQuote(buf, hack.String(key)) buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(m)) buf = strconv.AppendQuote(buf, hack.String(m))
}
case ZSizeType: case ZSizeType:
if key, err := db.zDecodeSizeKey(k); err != nil { key, err := db.zDecodeSizeKey(k)
if err != nil {
return nil, err return nil, err
} else {
buf = strconv.AppendQuote(buf, hack.String(key))
} }
buf = strconv.AppendQuote(buf, hack.String(key))
case ZScoreType: case ZScoreType:
if key, m, score, err := db.zDecodeScoreKey(k); err != nil { key, m, score, err := db.zDecodeScoreKey(k)
if err != nil {
return nil, err return nil, err
} else { }
buf = strconv.AppendQuote(buf, hack.String(key)) buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(m)) buf = strconv.AppendQuote(buf, hack.String(m))
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendInt(buf, score, 10) buf = strconv.AppendInt(buf, score, 10)
}
case SetType: case SetType:
if key, member, err := db.sDecodeSetKey(k); err != nil { key, member, err := db.sDecodeSetKey(k)
if err != nil {
return nil, err return nil, err
} else { }
buf = strconv.AppendQuote(buf, hack.String(key)) buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(member)) buf = strconv.AppendQuote(buf, hack.String(member))
}
case SSizeType: case SSizeType:
if key, err := db.sDecodeSizeKey(k); err != nil { key, err := db.sDecodeSizeKey(k)
if err != nil {
return nil, err return nil, err
} else {
buf = strconv.AppendQuote(buf, hack.String(key))
} }
buf = strconv.AppendQuote(buf, hack.String(key))
case ExpTimeType: case ExpTimeType:
if tp, key, t, err := db.expDecodeTimeKey(k); err != nil { tp, key, t, err := db.expDecodeTimeKey(k)
if err != nil {
return nil, err return nil, err
} else { }
buf = append(buf, TypeName[tp]...) buf = append(buf, TypeName[tp]...)
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(key)) buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendInt(buf, t, 10) buf = strconv.AppendInt(buf, t, 10)
}
case ExpMetaType: case ExpMetaType:
if tp, key, err := db.expDecodeMetaKey(k); err != nil { tp, key, err := db.expDecodeMetaKey(k)
if err != nil {
return nil, err return nil, err
} else { }
buf = append(buf, TypeName[tp]...) buf = append(buf, TypeName[tp]...)
buf = append(buf, ' ') buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(key)) buf = strconv.AppendQuote(buf, hack.String(key))
}
default: default:
return nil, errInvalidEvent return nil, errInvalidEvent
} }

View File

@ -15,6 +15,7 @@ import (
"github.com/siddontang/ledisdb/store" "github.com/siddontang/ledisdb/store"
) )
// Ledis is the core structure to handle the database.
type Ledis struct { type Ledis struct {
cfg *config.Config cfg *config.Config
@ -42,6 +43,7 @@ type Ledis struct {
ttlCheckerCh chan *ttlChecker ttlCheckerCh chan *ttlChecker
} }
// Open opens the Ledis with a config.
func Open(cfg *config.Config) (*Ledis, error) { func Open(cfg *config.Config) (*Ledis, error) {
if len(cfg.DataDir) == 0 { if len(cfg.DataDir) == 0 {
cfg.DataDir = config.DefaultDataDir cfg.DataDir = config.DefaultDataDir
@ -96,6 +98,7 @@ func Open(cfg *config.Config) (*Ledis, error) {
return l, nil return l, nil
} }
// Close closes the Ledis.
func (l *Ledis) Close() { func (l *Ledis) Close() {
close(l.quit) close(l.quit)
l.wg.Wait() l.wg.Wait()
@ -113,6 +116,7 @@ func (l *Ledis) Close() {
} }
} }
// Select chooses a database.
func (l *Ledis) Select(index int) (*DB, error) { func (l *Ledis) Select(index int) (*DB, error) {
if index < 0 || index >= l.cfg.Databases { if index < 0 || index >= l.cfg.Databases {
return nil, fmt.Errorf("invalid db index %d, must in [0, %d]", index, l.cfg.Databases-1) return nil, fmt.Errorf("invalid db index %d, must in [0, %d]", index, l.cfg.Databases-1)
@ -136,7 +140,7 @@ func (l *Ledis) Select(index int) (*DB, error) {
return db, nil return db, nil
} }
// Flush All will clear all data and replication logs // FlushAll will clear all data and replication logs
func (l *Ledis) FlushAll() error { func (l *Ledis) FlushAll() error {
l.wLock.Lock() l.wLock.Lock()
defer l.wLock.Unlock() defer l.wLock.Unlock()
@ -181,6 +185,7 @@ func (l *Ledis) flushAll() error {
return nil return nil
} }
// IsReadOnly returns whether Ledis is read only or not.
func (l *Ledis) IsReadOnly() bool { func (l *Ledis) IsReadOnly() bool {
if l.cfg.GetReadonly() { if l.cfg.GetReadonly() {
return true return true
@ -229,10 +234,12 @@ func (l *Ledis) checkTTL() {
} }
// StoreStat returns the statistics.
func (l *Ledis) StoreStat() *store.Stat { func (l *Ledis) StoreStat() *store.Stat {
return l.ldb.Stat() return l.ldb.Stat()
} }
// CompactStore compacts the backend storage.
func (l *Ledis) CompactStore() error { func (l *Ledis) CompactStore() error {
l.wLock.Lock() l.wLock.Lock()
defer l.wLock.Unlock() defer l.wLock.Unlock()

View File

@ -26,6 +26,7 @@ type ibucket interface {
RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
} }
// DB is the database.
type DB struct { type DB struct {
l *Ledis l *Ledis
@ -130,6 +131,7 @@ func (db *DB) newBatch() *batch {
return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}) return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock})
} }
// Index gets the index of database.
func (db *DB) Index() int { func (db *DB) Index() int {
return int(db.index) return int(db.index)
} }
@ -138,6 +140,7 @@ func (db *DB) Index() int {
// return db.status == DBAutoCommit // return db.status == DBAutoCommit
// } // }
// FlushAll flushes the data.
func (db *DB) FlushAll() (drop int64, err error) { func (db *DB) FlushAll() (drop int64, err error) {
all := [...](func() (int64, error)){ all := [...](func() (int64, error)){
db.flush, db.flush,
@ -147,12 +150,13 @@ func (db *DB) FlushAll() (drop int64, err error) {
db.sFlush} db.sFlush}
for _, flush := range all { for _, flush := range all {
if n, e := flush(); e != nil { n, e := flush()
if e != nil {
err = e err = e
return return
} else {
drop += n
} }
drop += n
} }
return return
@ -195,9 +199,9 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) {
if err = t.Commit(); err != nil { if err = t.Commit(); err != nil {
return return
} else {
drop += int64(len(keys))
} }
drop += int64(len(keys))
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false) keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
} }
return return

View File

@ -14,6 +14,7 @@ import (
Only support rdb version 6. Only support rdb version 6.
*/ */
// Dump dumps the KV value of key
func (db *DB) Dump(key []byte) ([]byte, error) { func (db *DB) Dump(key []byte) ([]byte, error) {
v, err := db.Get(key) v, err := db.Get(key)
if err != nil { if err != nil {
@ -25,6 +26,7 @@ func (db *DB) Dump(key []byte) ([]byte, error) {
return rdb.Dump(rdb.String(v)) return rdb.Dump(rdb.String(v))
} }
// LDump dumps the list value of key
func (db *DB) LDump(key []byte) ([]byte, error) { func (db *DB) LDump(key []byte) ([]byte, error) {
v, err := db.LRange(key, 0, -1) v, err := db.LRange(key, 0, -1)
if err != nil { if err != nil {
@ -36,6 +38,7 @@ func (db *DB) LDump(key []byte) ([]byte, error) {
return rdb.Dump(rdb.List(v)) return rdb.Dump(rdb.List(v))
} }
// HDump dumps the hash value of key
func (db *DB) HDump(key []byte) ([]byte, error) { func (db *DB) HDump(key []byte) ([]byte, error) {
v, err := db.HGetAll(key) v, err := db.HGetAll(key)
if err != nil { if err != nil {
@ -53,6 +56,7 @@ func (db *DB) HDump(key []byte) ([]byte, error) {
return rdb.Dump(o) return rdb.Dump(o)
} }
// SDump dumps the set value of key
func (db *DB) SDump(key []byte) ([]byte, error) { func (db *DB) SDump(key []byte) ([]byte, error) {
v, err := db.SMembers(key) v, err := db.SMembers(key)
if err != nil { if err != nil {
@ -64,6 +68,7 @@ func (db *DB) SDump(key []byte) ([]byte, error) {
return rdb.Dump(rdb.Set(v)) return rdb.Dump(rdb.Set(v))
} }
// ZDump dumps the zset value of key
func (db *DB) ZDump(key []byte) ([]byte, error) { func (db *DB) ZDump(key []byte) ([]byte, error) {
v, err := db.ZRangeByScore(key, MinScore, MaxScore, 0, -1) v, err := db.ZRangeByScore(key, MinScore, MaxScore, 0, -1)
if err != nil { if err != nil {
@ -81,6 +86,7 @@ func (db *DB) ZDump(key []byte) ([]byte, error) {
return rdb.Dump(o) return rdb.Dump(o)
} }
// Restore restores a key into database.
func (db *DB) Restore(key []byte, ttl int64, data []byte) error { func (db *DB) Restore(key []byte, ttl int64, data []byte) error {
d, err := rdb.DecodeDump(data) d, err := rdb.DecodeDump(data)
if err != nil { if err != nil {

View File

@ -16,10 +16,12 @@ const (
maxReplLogSize = 1 * 1024 * 1024 maxReplLogSize = 1 * 1024 * 1024
) )
// For replication error.
var ( var (
ErrLogMissed = errors.New("log is pured in server") ErrLogMissed = errors.New("log is pured in server")
) )
// ReplicationUsed returns whether replication is used or not.
func (l *Ledis) ReplicationUsed() bool { func (l *Ledis) ReplicationUsed() bool {
return l.r != nil return l.r != nil
} }
@ -38,10 +40,11 @@ func (l *Ledis) handleReplication() error {
if err != rpl.ErrNoBehindLog { if err != rpl.ErrNoBehindLog {
log.Errorf("get next commit log err, %s", err.Error) log.Errorf("get next commit log err, %s", err.Error)
return err return err
} else { }
return nil return nil
} }
} else {
l.rbatch.Rollback() l.rbatch.Rollback()
if rl.Compression == 1 { if rl.Compression == 1 {
@ -71,8 +74,6 @@ func (l *Ledis) handleReplication() error {
return err return err
} }
} }
}
} }
func (l *Ledis) onReplication() { func (l *Ledis) onReplication() {
@ -90,6 +91,7 @@ func (l *Ledis) onReplication() {
} }
} }
// WaitReplication waits replication done
func (l *Ledis) WaitReplication() error { func (l *Ledis) WaitReplication() error {
if !l.ReplicationUsed() { if !l.ReplicationUsed() {
return ErrRplNotSupport return ErrRplNotSupport
@ -117,6 +119,7 @@ func (l *Ledis) WaitReplication() error {
return errors.New("wait replication too many times") return errors.New("wait replication too many times")
} }
// StoreLogsFromReader stores logs from the Reader
func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { func (l *Ledis) StoreLogsFromReader(rb io.Reader) error {
if !l.ReplicationUsed() { if !l.ReplicationUsed() {
return ErrRplNotSupport return ErrRplNotSupport
@ -150,12 +153,14 @@ func (l *Ledis) noticeReplication() {
AsyncNotify(l.rc) AsyncNotify(l.rc)
} }
// StoreLogsFromData stores logs from data.
func (l *Ledis) StoreLogsFromData(data []byte) error { func (l *Ledis) StoreLogsFromData(data []byte) error {
rb := bytes.NewReader(data) rb := bytes.NewReader(data)
return l.StoreLogsFromReader(rb) return l.StoreLogsFromReader(rb)
} }
// ReadLogsTo reads logs and write to the Writer.
func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uint64, err error) { func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uint64, err error) {
if !l.ReplicationUsed() { if !l.ReplicationUsed() {
// no replication log // no replication log
@ -205,7 +210,8 @@ func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uin
return return
} }
// try to read events, if no events read, try to wait the new event singal until timeout seconds // ReadLogsToTimeout tries to read events, if no events read,
// tres to wait the new event singal until timeout seconds
func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int, quitCh chan struct{}) (n int, nextLogID uint64, err error) { func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int, quitCh chan struct{}) (n int, nextLogID uint64, err error) {
n, nextLogID, err = l.ReadLogsTo(startLogID, w) n, nextLogID, err = l.ReadLogsTo(startLogID, w)
if err != nil { if err != nil {
@ -229,8 +235,10 @@ func (l *Ledis) propagate(rl *rpl.Log) {
} }
} }
// NewLogEventHandler is the handler to handle new log event.
type NewLogEventHandler func(rl *rpl.Log) type NewLogEventHandler func(rl *rpl.Log)
// AddNewLogEventHandler adds the handler for the new log event
func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error { func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error {
if !l.ReplicationUsed() { if !l.ReplicationUsed() {
return ErrRplNotSupport return ErrRplNotSupport
@ -241,6 +249,7 @@ func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error {
return nil return nil
} }
// ReplicationStat returns the statistics of repliaciton.
func (l *Ledis) ReplicationStat() (*rpl.Stat, error) { func (l *Ledis) ReplicationStat() (*rpl.Stat, error) {
if !l.ReplicationUsed() { if !l.ReplicationUsed() {
return nil, ErrRplNotSupport return nil, ErrRplNotSupport

View File

@ -10,7 +10,7 @@ import (
var errDataType = errors.New("error data type") var errDataType = errors.New("error data type")
var errMetaKey = errors.New("error meta key") var errMetaKey = errors.New("error meta key")
//fif inclusive is true, scan range [cursor, inf) else (cursor, inf) //Scan scans the data. If inclusive is true, scan range [cursor, inf) else (cursor, inf)
func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
storeDataType, err := getDataStoreType(dataType) storeDataType, err := getDataStoreType(dataType)
if err != nil { if err != nil {
@ -20,7 +20,7 @@ func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool,
return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false) return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false)
} }
//if inclusive is true, revscan range (-inf, cursor] else (inf, cursor) // RevScan scans the data reversed. if inclusive is true, revscan range (-inf, cursor] else (inf, cursor)
func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
storeDataType, err := getDataStoreType(dataType) storeDataType, err := getDataStoreType(dataType)
if err != nil { if err != nil {
@ -51,7 +51,7 @@ func getDataStoreType(dataType DataType) (byte, error) {
func buildMatchRegexp(match string) (*regexp.Regexp, error) { func buildMatchRegexp(match string) (*regexp.Regexp, error) {
var err error var err error
var r *regexp.Regexp = nil var r *regexp.Regexp
if len(match) > 0 { if len(match) > 0 {
if r, err = regexp.Compile(match); err != nil { if r, err = regexp.Compile(match); err != nil {
@ -300,10 +300,12 @@ func (db *DB) hScanGeneric(key []byte, cursor []byte, count int, inclusive bool,
return v, nil return v, nil
} }
// HScan scans data for hash.
func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) {
return db.hScanGeneric(key, cursor, count, inclusive, match, false) return db.hScanGeneric(key, cursor, count, inclusive, match, false)
} }
// HRevScan reversed scans data for hash.
func (db *DB) HRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { func (db *DB) HRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) {
return db.hScanGeneric(key, cursor, count, inclusive, match, true) return db.hScanGeneric(key, cursor, count, inclusive, match, true)
} }
@ -341,10 +343,12 @@ func (db *DB) sScanGeneric(key []byte, cursor []byte, count int, inclusive bool,
return v, nil return v, nil
} }
// SScan scans data for set.
func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.sScanGeneric(key, cursor, count, inclusive, match, false) return db.sScanGeneric(key, cursor, count, inclusive, match, false)
} }
// SRevScan scans data reversed for set.
func (db *DB) SRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { func (db *DB) SRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.sScanGeneric(key, cursor, count, inclusive, match, true) return db.sScanGeneric(key, cursor, count, inclusive, match, true)
} }
@ -387,10 +391,12 @@ func (db *DB) zScanGeneric(key []byte, cursor []byte, count int, inclusive bool,
return v, nil return v, nil
} }
// ZScan scans data for zset.
func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) {
return db.zScanGeneric(key, cursor, count, inclusive, match, false) return db.zScanGeneric(key, cursor, count, inclusive, match, false)
} }
// ZRevScan scans data reversed for zset.
func (db *DB) ZRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { func (db *DB) ZRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) {
return db.zScanGeneric(key, cursor, count, inclusive, match, true) return db.zScanGeneric(key, cursor, count, inclusive, match, true)
} }

View File

@ -9,6 +9,7 @@ import (
"github.com/siddontang/ledisdb/store" "github.com/siddontang/ledisdb/store"
) )
// Limit is for sort.
type Limit struct { type Limit struct {
Offset int Offset int
Size int Size int
@ -52,7 +53,7 @@ func (db *DB) lookupKeyByPattern(pattern []byte, subKey []byte) []byte {
} }
key := pattern key := pattern
var field []byte = nil var field []byte
// Find out if we're dealing with a hash dereference // Find out if we're dealing with a hash dereference
if n := bytes.Index(pattern, hashPattern); n > 0 && n+3 < len(pattern) { if n := bytes.Index(pattern, hashPattern); n > 0 && n+3 < len(pattern) {
@ -109,18 +110,16 @@ func (s *sortItemSlice) Less(i, j int) bool {
if s1.cmpValue == nil || s2.cmpValue == nil { if s1.cmpValue == nil || s2.cmpValue == nil {
if s1.cmpValue == nil { if s1.cmpValue == nil {
return true return true
} else { }
return false return false
} }
} else {
// Unlike redis, we only use bytes compare // Unlike redis, we only use bytes compare
return bytes.Compare(s1.cmpValue, s2.cmpValue) < 0 return bytes.Compare(s1.cmpValue, s2.cmpValue) < 0
} }
} else {
// Unlike redis, we only use bytes compare // Unlike redis, we only use bytes compare
return bytes.Compare(s1.value, s2.value) < 0 return bytes.Compare(s1.value, s2.value) < 0
} }
}
} }
func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
@ -184,7 +183,7 @@ func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool
} }
} }
var resLen int = end - start + 1 resLen := end - start + 1
if len(sortGet) > 0 { if len(sortGet) > 0 {
resLen = len(sortGet) * (end - start + 1) resLen = len(sortGet) * (end - start + 1)
} }
@ -204,6 +203,7 @@ func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool
return res, nil return res, nil
} }
// XLSort sorts list.
func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
values, err := db.LRange(key, 0, -1) values, err := db.LRange(key, 0, -1)
@ -214,6 +214,7 @@ func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, so
return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet)
} }
// XSSort sorts set.
func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
values, err := db.SMembers(key) values, err := db.SMembers(key)
if err != nil { if err != nil {
@ -223,6 +224,7 @@ func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, so
return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet)
} }
// XZSort sorts zset.
func (db *DB) XZSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { func (db *DB) XZSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
values, err := db.ZRangeByLex(key, nil, nil, store.RangeClose, 0, -1) values, err := db.ZRangeByLex(key, nil, nil, store.RangeClose, 0, -1)
if err != nil { if err != nil {

View File

@ -9,6 +9,7 @@ import (
"github.com/siddontang/ledisdb/store" "github.com/siddontang/ledisdb/store"
) )
// FVPair is the pair of field and value.
type FVPair struct { type FVPair struct {
Field []byte Field []byte
Value []byte Value []byte
@ -154,7 +155,7 @@ func (db *DB) hDelete(t *batch, key []byte) int64 {
start := db.hEncodeStartKey(key) start := db.hEncodeStartKey(key)
stop := db.hEncodeStopKey(key) stop := db.hEncodeStopKey(key)
var num int64 = 0 var num int64
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
for ; it.Valid(); it.Next() { for ; it.Valid(); it.Next() {
t.Delete(it.Key()) t.Delete(it.Key())
@ -173,15 +174,17 @@ func (db *DB) hExpireAt(key []byte, when int64) (int64, error) {
if hlen, err := db.HLen(key); err != nil || hlen == 0 { if hlen, err := db.HLen(key); err != nil || hlen == 0 {
return 0, err return 0, err
} else { }
db.expireAt(t, HashType, key, when) db.expireAt(t, HashType, key, when)
if err := t.Commit(); err != nil { if err := t.Commit(); err != nil {
return 0, err return 0, err
} }
}
return 1, nil return 1, nil
} }
// HLen returns the lengh of hash.
func (db *DB) HLen(key []byte) (int64, error) { func (db *DB) HLen(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -190,6 +193,7 @@ func (db *DB) HLen(key []byte) (int64, error) {
return Int64(db.bucket.Get(db.hEncodeSizeKey(key))) return Int64(db.bucket.Get(db.hEncodeSizeKey(key)))
} }
// HSet sets the field with value of key.
func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) { func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) {
if err := checkHashKFSize(key, field); err != nil { if err := checkHashKFSize(key, field); err != nil {
return 0, err return 0, err
@ -210,6 +214,7 @@ func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) {
return n, err return n, err
} }
// HGet gets the value of the field.
func (db *DB) HGet(key []byte, field []byte) ([]byte, error) { func (db *DB) HGet(key []byte, field []byte) ([]byte, error) {
if err := checkHashKFSize(key, field); err != nil { if err := checkHashKFSize(key, field); err != nil {
return nil, err return nil, err
@ -218,6 +223,7 @@ func (db *DB) HGet(key []byte, field []byte) ([]byte, error) {
return db.bucket.Get(db.hEncodeHashKey(key, field)) return db.bucket.Get(db.hEncodeHashKey(key, field))
} }
// HMset sets multi field-values.
func (db *DB) HMset(key []byte, args ...FVPair) error { func (db *DB) HMset(key []byte, args ...FVPair) error {
t := db.hashBatch t := db.hashBatch
t.Lock() t.Lock()
@ -225,7 +231,7 @@ func (db *DB) HMset(key []byte, args ...FVPair) error {
var err error var err error
var ek []byte var ek []byte
var num int64 = 0 var num int64
for i := 0; i < len(args); i++ { for i := 0; i < len(args); i++ {
if err := checkHashKFSize(key, args[i].Field); err != nil { if err := checkHashKFSize(key, args[i].Field); err != nil {
return err return err
@ -253,6 +259,7 @@ func (db *DB) HMset(key []byte, args ...FVPair) error {
return err return err
} }
// HMget gets multi values of fields
func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) { func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) {
var ek []byte var ek []byte
@ -273,6 +280,7 @@ func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) {
return r, nil return r, nil
} }
// HDel deletes the fields.
func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) { func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) {
t := db.hashBatch t := db.hashBatch
@ -286,7 +294,7 @@ func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) {
it := db.bucket.NewIterator() it := db.bucket.NewIterator()
defer it.Close() defer it.Close()
var num int64 = 0 var num int64
for i := 0; i < len(args); i++ { for i := 0; i < len(args); i++ {
if err := checkHashKFSize(key, args[i]); err != nil { if err := checkHashKFSize(key, args[i]); err != nil {
return 0, err return 0, err
@ -317,10 +325,11 @@ func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) {
sk := db.hEncodeSizeKey(key) sk := db.hEncodeSizeKey(key)
var err error var err error
var size int64 = 0 var size int64
if size, err = Int64(db.bucket.Get(sk)); err != nil { if size, err = Int64(db.bucket.Get(sk)); err != nil {
return 0, err return 0, err
} else { }
size += delta size += delta
if size <= 0 { if size <= 0 {
size = 0 size = 0
@ -329,11 +338,11 @@ func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) {
} else { } else {
t.Put(sk, PutInt64(size)) t.Put(sk, PutInt64(size))
} }
}
return size, nil return size, nil
} }
// HIncrBy increases the value of field by delta.
func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) {
if err := checkHashKFSize(key, field); err != nil { if err := checkHashKFSize(key, field); err != nil {
return 0, err return 0, err
@ -348,7 +357,7 @@ func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) {
ek = db.hEncodeHashKey(key, field) ek = db.hEncodeHashKey(key, field)
var n int64 = 0 var n int64
if n, err = StrInt64(db.bucket.Get(ek)); err != nil { if n, err = StrInt64(db.bucket.Get(ek)); err != nil {
return 0, err return 0, err
} }
@ -365,6 +374,7 @@ func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) {
return n, err return n, err
} }
// HGetAll returns all field-values.
func (db *DB) HGetAll(key []byte) ([]FVPair, error) { func (db *DB) HGetAll(key []byte) ([]FVPair, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -390,6 +400,7 @@ func (db *DB) HGetAll(key []byte) ([]FVPair, error) {
return v, nil return v, nil
} }
// HKeys returns the all fields.
func (db *DB) HKeys(key []byte) ([][]byte, error) { func (db *DB) HKeys(key []byte) ([][]byte, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -414,6 +425,7 @@ func (db *DB) HKeys(key []byte) ([][]byte, error) {
return v, nil return v, nil
} }
// HValues returns all values
func (db *DB) HValues(key []byte) ([][]byte, error) { func (db *DB) HValues(key []byte) ([][]byte, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -439,6 +451,7 @@ func (db *DB) HValues(key []byte) ([][]byte, error) {
return v, nil return v, nil
} }
// HClear clears the data.
func (db *DB) HClear(key []byte) (int64, error) { func (db *DB) HClear(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -455,6 +468,7 @@ func (db *DB) HClear(key []byte) (int64, error) {
return num, err return num, err
} }
// HMclear cleans multi data.
func (db *DB) HMclear(keys ...[]byte) (int64, error) { func (db *DB) HMclear(keys ...[]byte) (int64, error) {
t := db.hashBatch t := db.hashBatch
t.Lock() t.Lock()
@ -482,6 +496,7 @@ func (db *DB) hFlush() (drop int64, err error) {
return db.flushType(t, HashType) return db.flushType(t, HashType)
} }
// HExpire expires the data with duration.
func (db *DB) HExpire(key []byte, duration int64) (int64, error) { func (db *DB) HExpire(key []byte, duration int64) (int64, error) {
if duration <= 0 { if duration <= 0 {
return 0, errExpireValue return 0, errExpireValue
@ -490,6 +505,7 @@ func (db *DB) HExpire(key []byte, duration int64) (int64, error) {
return db.hExpireAt(key, time.Now().Unix()+duration) return db.hExpireAt(key, time.Now().Unix()+duration)
} }
// HExpireAt expires the data at time when.
func (db *DB) HExpireAt(key []byte, when int64) (int64, error) { func (db *DB) HExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() { if when <= time.Now().Unix() {
return 0, errExpireValue return 0, errExpireValue
@ -498,6 +514,7 @@ func (db *DB) HExpireAt(key []byte, when int64) (int64, error) {
return db.hExpireAt(key, when) return db.hExpireAt(key, when)
} }
// HTTL gets the TTL of data.
func (db *DB) HTTL(key []byte) (int64, error) { func (db *DB) HTTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return -1, err return -1, err
@ -506,6 +523,7 @@ func (db *DB) HTTL(key []byte) (int64, error) {
return db.ttl(HashType, key) return db.ttl(HashType, key)
} }
// HPersist removes the TTL of data.
func (db *DB) HPersist(key []byte) (int64, error) { func (db *DB) HPersist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -524,6 +542,7 @@ func (db *DB) HPersist(key []byte) (int64, error) {
return n, err return n, err
} }
// HKeyExists checks whether data exists or not.
func (db *DB) HKeyExists(key []byte) (int64, error) { func (db *DB) HKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err

View File

@ -11,6 +11,7 @@ import (
"github.com/siddontang/ledisdb/store" "github.com/siddontang/ledisdb/store"
) )
// KVPair is the pair of key-value.
type KVPair struct { type KVPair struct {
Key []byte Key []byte
Value []byte Value []byte
@ -109,23 +110,27 @@ func (db *DB) setExpireAt(key []byte, when int64) (int64, error) {
if exist, err := db.Exists(key); err != nil || exist == 0 { if exist, err := db.Exists(key); err != nil || exist == 0 {
return 0, err return 0, err
} else { }
db.expireAt(t, KVType, key, when) db.expireAt(t, KVType, key, when)
if err := t.Commit(); err != nil { if err := t.Commit(); err != nil {
return 0, err return 0, err
} }
}
return 1, nil return 1, nil
} }
// Decr decreases the data.
func (db *DB) Decr(key []byte) (int64, error) { func (db *DB) Decr(key []byte) (int64, error) {
return db.incr(key, -1) return db.incr(key, -1)
} }
// DecrBy decreases the data by decrement.
func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) { func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) {
return db.incr(key, -decrement) return db.incr(key, -decrement)
} }
// Del deletes the data.
func (db *DB) Del(keys ...[]byte) (int64, error) { func (db *DB) Del(keys ...[]byte) (int64, error) {
if len(keys) == 0 { if len(keys) == 0 {
return 0, nil return 0, nil
@ -149,6 +154,7 @@ func (db *DB) Del(keys ...[]byte) (int64, error) {
return int64(len(keys)), err return int64(len(keys)), err
} }
// Exists check data exists or not.
func (db *DB) Exists(key []byte) (int64, error) { func (db *DB) Exists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -166,6 +172,7 @@ func (db *DB) Exists(key []byte) (int64, error) {
return 0, err return 0, err
} }
// Get gets the value.
func (db *DB) Get(key []byte) ([]byte, error) { func (db *DB) Get(key []byte) ([]byte, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -176,6 +183,7 @@ func (db *DB) Get(key []byte) ([]byte, error) {
return db.bucket.Get(key) return db.bucket.Get(key)
} }
// GetSlice gets the slice of the data.
func (db *DB) GetSlice(key []byte) (store.Slice, error) { func (db *DB) GetSlice(key []byte) (store.Slice, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -186,6 +194,7 @@ func (db *DB) GetSlice(key []byte) (store.Slice, error) {
return db.bucket.GetSlice(key) return db.bucket.GetSlice(key)
} }
// GetSet gets the value and sets new value.
func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) { func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -212,14 +221,17 @@ func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) {
return oldValue, err return oldValue, err
} }
// Incr increases the data.
func (db *DB) Incr(key []byte) (int64, error) { func (db *DB) Incr(key []byte) (int64, error) {
return db.incr(key, 1) return db.incr(key, 1)
} }
// IncrBy increases the data by increment.
func (db *DB) IncrBy(key []byte, increment int64) (int64, error) { func (db *DB) IncrBy(key []byte, increment int64) (int64, error) {
return db.incr(key, increment) return db.incr(key, increment)
} }
// MGet gets multi data.
func (db *DB) MGet(keys ...[]byte) ([][]byte, error) { func (db *DB) MGet(keys ...[]byte) ([][]byte, error) {
values := make([][]byte, len(keys)) values := make([][]byte, len(keys))
@ -237,6 +249,7 @@ func (db *DB) MGet(keys ...[]byte) ([][]byte, error) {
return values, nil return values, nil
} }
// MSet sets multi data.
func (db *DB) MSet(args ...KVPair) error { func (db *DB) MSet(args ...KVPair) error {
if len(args) == 0 { if len(args) == 0 {
return nil return nil
@ -270,6 +283,7 @@ func (db *DB) MSet(args ...KVPair) error {
return err return err
} }
// Set sets the data.
func (db *DB) Set(key []byte, value []byte) error { func (db *DB) Set(key []byte, value []byte) error {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return err return err
@ -292,6 +306,7 @@ func (db *DB) Set(key []byte, value []byte) error {
return err return err
} }
// SetNX sets the data if not existed.
func (db *DB) SetNX(key []byte, value []byte) (int64, error) { func (db *DB) SetNX(key []byte, value []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -322,6 +337,7 @@ func (db *DB) SetNX(key []byte, value []byte) (int64, error) {
return n, err return n, err
} }
// SetEX sets the data with a TTL.
func (db *DB) SetEX(key []byte, duration int64, value []byte) error { func (db *DB) SetEX(key []byte, duration int64, value []byte) error {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return err return err
@ -341,11 +357,7 @@ func (db *DB) SetEX(key []byte, duration int64, value []byte) error {
t.Put(ek, value) t.Put(ek, value)
db.expireAt(t, KVType, key, time.Now().Unix()+duration) db.expireAt(t, KVType, key, time.Now().Unix()+duration)
if err := t.Commit(); err != nil { return t.Commit()
return err
}
return nil
} }
func (db *DB) flush() (drop int64, err error) { func (db *DB) flush() (drop int64, err error) {
@ -355,6 +367,7 @@ func (db *DB) flush() (drop int64, err error) {
return db.flushType(t, KVType) return db.flushType(t, KVType)
} }
// Expire expires the data.
func (db *DB) Expire(key []byte, duration int64) (int64, error) { func (db *DB) Expire(key []byte, duration int64) (int64, error) {
if duration <= 0 { if duration <= 0 {
return 0, errExpireValue return 0, errExpireValue
@ -363,6 +376,7 @@ func (db *DB) Expire(key []byte, duration int64) (int64, error) {
return db.setExpireAt(key, time.Now().Unix()+duration) return db.setExpireAt(key, time.Now().Unix()+duration)
} }
// ExpireAt expires the data at when.
func (db *DB) ExpireAt(key []byte, when int64) (int64, error) { func (db *DB) ExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() { if when <= time.Now().Unix() {
return 0, errExpireValue return 0, errExpireValue
@ -371,6 +385,7 @@ func (db *DB) ExpireAt(key []byte, when int64) (int64, error) {
return db.setExpireAt(key, when) return db.setExpireAt(key, when)
} }
// TTL returns the TTL of the data.
func (db *DB) TTL(key []byte) (int64, error) { func (db *DB) TTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return -1, err return -1, err
@ -379,6 +394,7 @@ func (db *DB) TTL(key []byte) (int64, error) {
return db.ttl(KVType, key) return db.ttl(KVType, key)
} }
// Persist removes the TTL of the data.
func (db *DB) Persist(key []byte) (int64, error) { func (db *DB) Persist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -396,6 +412,7 @@ func (db *DB) Persist(key []byte) (int64, error) {
return n, err return n, err
} }
// SetRange sets the data with new value from offset.
func (db *DB) SetRange(key []byte, offset int, value []byte) (int64, error) { func (db *DB) SetRange(key []byte, offset int, value []byte) (int64, error) {
if len(value) == 0 { if len(value) == 0 {
return 0, nil return 0, nil
@ -458,6 +475,7 @@ func getRange(start int, end int, valLen int) (int, int) {
return start, end return start, end
} }
// GetRange gets the range of the data.
func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) { func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -480,6 +498,7 @@ func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) {
return value[start : end+1], nil return value[start : end+1], nil
} }
// StrLen returns the length of the data.
func (db *DB) StrLen(key []byte) (int64, error) { func (db *DB) StrLen(key []byte) (int64, error) {
s, err := db.GetSlice(key) s, err := db.GetSlice(key)
if err != nil { if err != nil {
@ -491,6 +510,7 @@ func (db *DB) StrLen(key []byte) (int64, error) {
return int64(n), nil return int64(n), nil
} }
// Append appends the value to the data.
func (db *DB) Append(key []byte, value []byte) (int64, error) { func (db *DB) Append(key []byte, value []byte) (int64, error) {
if len(value) == 0 { if len(value) == 0 {
return 0, nil return 0, nil
@ -526,6 +546,7 @@ func (db *DB) Append(key []byte, value []byte) (int64, error) {
return int64(len(oldValue)), nil return int64(len(oldValue)), nil
} }
// BitOP does the bit operations in data.
func (db *DB) BitOP(op string, destKey []byte, srcKeys ...[]byte) (int64, error) { func (db *DB) BitOP(op string, destKey []byte, srcKeys ...[]byte) (int64, error) {
if err := checkKeySize(destKey); err != nil { if err := checkKeySize(destKey); err != nil {
return 0, err return 0, err
@ -628,6 +649,7 @@ func numberBitCount(i uint32) uint32 {
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24 return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24
} }
// BitCount returns the bit count of data.
func (db *DB) BitCount(key []byte, start int, end int) (int64, error) { func (db *DB) BitCount(key []byte, start int, end int) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -642,7 +664,7 @@ func (db *DB) BitCount(key []byte, start int, end int) (int64, error) {
start, end = getRange(start, end, len(value)) start, end = getRange(start, end, len(value))
value = value[start : end+1] value = value[start : end+1]
var n int64 = 0 var n int64
pos := 0 pos := 0
for ; pos+4 <= len(value); pos = pos + 4 { for ; pos+4 <= len(value); pos = pos + 4 {
@ -656,6 +678,7 @@ func (db *DB) BitCount(key []byte, start int, end int) (int64, error) {
return n, nil return n, nil
} }
// BitPos returns the pos of the data.
func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) { func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -665,7 +688,7 @@ func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) {
return 0, fmt.Errorf("bit must be 0 or 1, not %d", on) return 0, fmt.Errorf("bit must be 0 or 1, not %d", on)
} }
var skipValue uint8 = 0 var skipValue uint8
if on == 0 { if on == 0 {
skipValue = 0xFF skipValue = 0xFF
} }
@ -694,6 +717,7 @@ func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) {
return -1, nil return -1, nil
} }
// SetBit sets the bit to the data.
func (db *DB) SetBit(key []byte, offset int, on int) (int64, error) { func (db *DB) SetBit(key []byte, offset int, on int) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -736,11 +760,12 @@ func (db *DB) SetBit(key []byte, offset int, on int) (int64, error) {
if bitVal > 0 { if bitVal > 0 {
return 1, nil return 1, nil
} else {
return 0, nil
} }
return 0, nil
} }
// GetBit gets the bit of data at offset.
func (db *DB) GetBit(key []byte, offset int) (int64, error) { func (db *DB) GetBit(key []byte, offset int) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -763,7 +788,7 @@ func (db *DB) GetBit(key []byte, offset int) (int64, error) {
bitVal := value[byteOffset] & (1 << bit) bitVal := value[byteOffset] & (1 << bit)
if bitVal > 0 { if bitVal > 0 {
return 1, nil return 1, nil
} else {
return 0, nil
} }
return 0, nil
} }

View File

@ -121,12 +121,12 @@ func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) {
return 0, err return 0, err
} }
var pushCnt int = len(args) pushCnt := len(args)
if pushCnt == 0 { if pushCnt == 0 {
return int64(size), nil return int64(size), nil
} }
var seq int32 = headSeq seq := headSeq
var delta int32 = -1 var delta int32 = -1
if whereSeq == listTailSeq { if whereSeq == listTailSeq {
seq = tailSeq seq = tailSeq
@ -190,7 +190,7 @@ func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) {
var value []byte var value []byte
var seq int32 = headSeq seq := headSeq
if whereSeq == listTailSeq { if whereSeq == listTailSeq {
seq = tailSeq seq = tailSeq
} }
@ -202,9 +202,9 @@ func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) {
} }
if whereSeq == listHeadSeq { if whereSeq == listHeadSeq {
headSeq += 1 headSeq++
} else { } else {
tailSeq -= 1 tailSeq--
} }
t.Delete(itemKey) t.Delete(itemKey)
@ -234,7 +234,8 @@ func (db *DB) ltrim2(key []byte, startP, stopP int64) (err error) {
ek := db.lEncodeMetaKey(key) ek := db.lEncodeMetaKey(key)
if headSeq, _, llen, err = db.lGetMeta(nil, ek); err != nil { if headSeq, _, llen, err = db.lGetMeta(nil, ek); err != nil {
return err return err
} else { }
if start < 0 { if start < 0 {
start = llen + start start = llen + start
} }
@ -253,7 +254,6 @@ func (db *DB) ltrim2(key []byte, startP, stopP int64) (err error) {
if stop >= llen { if stop >= llen {
stop = llen - 1 stop = llen - 1
} }
}
if start > 0 { if start > 0 {
for i := int32(0); i < start; i++ { for i := int32(0); i < start; i++ {
@ -343,11 +343,14 @@ func (db *DB) lDelete(t *batch, key []byte) int64 {
return 0 return 0
} }
var num int64 = 0 var num int64
startKey := db.lEncodeListKey(key, headSeq) startKey := db.lEncodeListKey(key, headSeq)
stopKey := db.lEncodeListKey(key, tailSeq) stopKey := db.lEncodeListKey(key, tailSeq)
rit := store.NewRangeIterator(it, &store.Range{startKey, stopKey, store.RangeClose}) rit := store.NewRangeIterator(it, &store.Range{
Min: startKey,
Max: stopKey,
Type: store.RangeClose})
for ; rit.Valid(); rit.Next() { for ; rit.Valid(); rit.Next() {
t.Delete(rit.RawKey()) t.Delete(rit.RawKey())
num++ num++
@ -383,7 +386,7 @@ func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq in
func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 { func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 {
t := db.listBatch t := db.listBatch
var size int32 = tailSeq - headSeq + 1 size := tailSeq - headSeq + 1
if size < 0 { if size < 0 {
// todo : log error + panic // todo : log error + panic
log.Fatalf("invalid meta sequence range [%d, %d]", headSeq, tailSeq) log.Fatalf("invalid meta sequence range [%d, %d]", headSeq, tailSeq)
@ -408,15 +411,17 @@ func (db *DB) lExpireAt(key []byte, when int64) (int64, error) {
if llen, err := db.LLen(key); err != nil || llen == 0 { if llen, err := db.LLen(key); err != nil || llen == 0 {
return 0, err return 0, err
} else { }
db.expireAt(t, ListType, key, when) db.expireAt(t, ListType, key, when)
if err := t.Commit(); err != nil { if err := t.Commit(); err != nil {
return 0, err return 0, err
} }
}
return 1, nil return 1, nil
} }
// LIndex returns the value at index.
func (db *DB) LIndex(key []byte, index int32) ([]byte, error) { func (db *DB) LIndex(key []byte, index int32) ([]byte, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -449,6 +454,7 @@ func (db *DB) LIndex(key []byte, index int32) ([]byte, error) {
return v, nil return v, nil
} }
// LLen gets the length of the list.
func (db *DB) LLen(key []byte) (int64, error) { func (db *DB) LLen(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -459,25 +465,32 @@ func (db *DB) LLen(key []byte) (int64, error) {
return int64(size), err return int64(size), err
} }
// LPop pops the value.
func (db *DB) LPop(key []byte) ([]byte, error) { func (db *DB) LPop(key []byte) ([]byte, error) {
return db.lpop(key, listHeadSeq) return db.lpop(key, listHeadSeq)
} }
// LTrim trims the value from start to stop.
func (db *DB) LTrim(key []byte, start, stop int64) error { func (db *DB) LTrim(key []byte, start, stop int64) error {
return db.ltrim2(key, start, stop) return db.ltrim2(key, start, stop)
} }
// LTrimFront trims the value from top.
func (db *DB) LTrimFront(key []byte, trimSize int32) (int32, error) { func (db *DB) LTrimFront(key []byte, trimSize int32) (int32, error) {
return db.ltrim(key, trimSize, listHeadSeq) return db.ltrim(key, trimSize, listHeadSeq)
} }
// LTrimBack trims the value from back.
func (db *DB) LTrimBack(key []byte, trimSize int32) (int32, error) { func (db *DB) LTrimBack(key []byte, trimSize int32) (int32, error) {
return db.ltrim(key, trimSize, listTailSeq) return db.ltrim(key, trimSize, listTailSeq)
} }
// LPush push the value to the list.
func (db *DB) LPush(key []byte, args ...[]byte) (int64, error) { func (db *DB) LPush(key []byte, args ...[]byte) (int64, error) {
return db.lpush(key, listHeadSeq, args...) return db.lpush(key, listHeadSeq, args...)
} }
// LSet sets the value at index.
func (db *DB) LSet(key []byte, index int32, value []byte) error { func (db *DB) LSet(key []byte, index int32, value []byte) error {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return err return err
@ -512,6 +525,7 @@ func (db *DB) LSet(key []byte, index int32, value []byte) error {
return err return err
} }
// LRange gets the value of list at range.
func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) { func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -570,14 +584,17 @@ func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) {
return v, nil return v, nil
} }
// RPop rpops the value.
func (db *DB) RPop(key []byte) ([]byte, error) { func (db *DB) RPop(key []byte) ([]byte, error) {
return db.lpop(key, listTailSeq) return db.lpop(key, listTailSeq)
} }
// RPush rpushs the value .
func (db *DB) RPush(key []byte, args ...[]byte) (int64, error) { func (db *DB) RPush(key []byte, args ...[]byte) (int64, error) {
return db.lpush(key, listTailSeq, args...) return db.lpush(key, listTailSeq, args...)
} }
// LClear clears the list.
func (db *DB) LClear(key []byte) (int64, error) { func (db *DB) LClear(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -594,6 +611,7 @@ func (db *DB) LClear(key []byte) (int64, error) {
return num, err return num, err
} }
// LMclear clears multi lists.
func (db *DB) LMclear(keys ...[]byte) (int64, error) { func (db *DB) LMclear(keys ...[]byte) (int64, error) {
t := db.listBatch t := db.listBatch
t.Lock() t.Lock()
@ -620,6 +638,7 @@ func (db *DB) lFlush() (drop int64, err error) {
return db.flushType(t, ListType) return db.flushType(t, ListType)
} }
// LExpire expires the list.
func (db *DB) LExpire(key []byte, duration int64) (int64, error) { func (db *DB) LExpire(key []byte, duration int64) (int64, error) {
if duration <= 0 { if duration <= 0 {
return 0, errExpireValue return 0, errExpireValue
@ -628,6 +647,7 @@ func (db *DB) LExpire(key []byte, duration int64) (int64, error) {
return db.lExpireAt(key, time.Now().Unix()+duration) return db.lExpireAt(key, time.Now().Unix()+duration)
} }
// LExpireAt expires the list at when.
func (db *DB) LExpireAt(key []byte, when int64) (int64, error) { func (db *DB) LExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() { if when <= time.Now().Unix() {
return 0, errExpireValue return 0, errExpireValue
@ -636,6 +656,7 @@ func (db *DB) LExpireAt(key []byte, when int64) (int64, error) {
return db.lExpireAt(key, when) return db.lExpireAt(key, when)
} }
// LTTL gets the TTL of list.
func (db *DB) LTTL(key []byte) (int64, error) { func (db *DB) LTTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return -1, err return -1, err
@ -644,6 +665,7 @@ func (db *DB) LTTL(key []byte) (int64, error) {
return db.ttl(ListType, key) return db.ttl(ListType, key)
} }
// LPersist removes the TTL of list.
func (db *DB) LPersist(key []byte) (int64, error) { func (db *DB) LPersist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -672,14 +694,17 @@ func (db *DB) lEncodeMaxKey() []byte {
return ek return ek
} }
// BLPop pops the list with block way.
func (db *DB) BLPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) { func (db *DB) BLPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) {
return db.lblockPop(keys, listHeadSeq, timeout) return db.lblockPop(keys, listHeadSeq, timeout)
} }
// BRPop bpops the list with block way.
func (db *DB) BRPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) { func (db *DB) BRPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) {
return db.lblockPop(keys, listTailSeq, timeout) return db.lblockPop(keys, listTailSeq, timeout)
} }
// LKeyExists check list existed or not.
func (db *DB) LKeyExists(key []byte) (int64, error) { func (db *DB) LKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err

View File

@ -12,6 +12,7 @@ import (
var errSetKey = errors.New("invalid set key") var errSetKey = errors.New("invalid set key")
var errSSizeKey = errors.New("invalid ssize key") var errSSizeKey = errors.New("invalid ssize key")
// For set operation type.
const ( const (
setStartSep byte = ':' setStartSep byte = ':'
setStopSep byte = setStartSep + 1 setStopSep byte = setStartSep + 1
@ -137,7 +138,7 @@ func (db *DB) sDelete(t *batch, key []byte) int64 {
start := db.sEncodeStartKey(key) start := db.sEncodeStartKey(key)
stop := db.sEncodeStopKey(key) stop := db.sEncodeStopKey(key)
var num int64 = 0 var num int64
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
for ; it.Valid(); it.Next() { for ; it.Valid(); it.Next() {
t.Delete(it.RawKey()) t.Delete(it.RawKey())
@ -155,10 +156,11 @@ func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) {
sk := db.sEncodeSizeKey(key) sk := db.sEncodeSizeKey(key)
var err error var err error
var size int64 = 0 var size int64
if size, err = Int64(db.bucket.Get(sk)); err != nil { if size, err = Int64(db.bucket.Get(sk)); err != nil {
return 0, err return 0, err
} else { }
size += delta size += delta
if size <= 0 { if size <= 0 {
size = 0 size = 0
@ -167,7 +169,6 @@ func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) {
} else { } else {
t.Put(sk, PutInt64(size)) t.Put(sk, PutInt64(size))
} }
}
return size, nil return size, nil
} }
@ -179,14 +180,12 @@ func (db *DB) sExpireAt(key []byte, when int64) (int64, error) {
if scnt, err := db.SCard(key); err != nil || scnt == 0 { if scnt, err := db.SCard(key); err != nil || scnt == 0 {
return 0, err return 0, err
} else { }
db.expireAt(t, SetType, key, when) db.expireAt(t, SetType, key, when)
if err := t.Commit(); err != nil { if err := t.Commit(); err != nil {
return 0, err return 0, err
} }
}
return 1, nil return 1, nil
} }
@ -207,6 +206,7 @@ func (db *DB) sSetItem(key []byte, member []byte) (int64, error) {
return n, nil return n, nil
} }
// SAdd adds the value to the set.
func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) { func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) {
t := db.setBatch t := db.setBatch
t.Lock() t.Lock()
@ -214,7 +214,7 @@ func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) {
var err error var err error
var ek []byte var ek []byte
var num int64 = 0 var num int64
for i := 0; i < len(args); i++ { for i := 0; i < len(args); i++ {
if err := checkSetKMSize(key, args[i]); err != nil { if err := checkSetKMSize(key, args[i]); err != nil {
return 0, err return 0, err
@ -240,6 +240,7 @@ func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) {
} }
// SCard gets the size of set.
func (db *DB) SCard(key []byte) (int64, error) { func (db *DB) SCard(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -294,16 +295,19 @@ func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) {
return slice, nil return slice, nil
} }
// SDiff gets the different of sets.
func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) { func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) {
v, err := db.sDiffGeneric(keys...) v, err := db.sDiffGeneric(keys...)
return v, err return v, err
} }
// SDiffStore gets the different of sets and stores to dest set.
func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) { func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) {
n, err := db.sStoreGeneric(dstKey, DiffType, keys...) n, err := db.sStoreGeneric(dstKey, DiffType, keys...)
return n, err return n, err
} }
// SKeyExists checks whether set existed or not.
func (db *DB) SKeyExists(key []byte) (int64, error) { func (db *DB) SKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -370,17 +374,20 @@ func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) {
} }
// SInter intersects the sets.
func (db *DB) SInter(keys ...[]byte) ([][]byte, error) { func (db *DB) SInter(keys ...[]byte) ([][]byte, error) {
v, err := db.sInterGeneric(keys...) v, err := db.sInterGeneric(keys...)
return v, err return v, err
} }
// SInterStore intersects the sets and stores to dest set.
func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) { func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) {
n, err := db.sStoreGeneric(dstKey, InterType, keys...) n, err := db.sStoreGeneric(dstKey, InterType, keys...)
return n, err return n, err
} }
// SIsMember checks member in set.
func (db *DB) SIsMember(key []byte, member []byte) (int64, error) { func (db *DB) SIsMember(key []byte, member []byte) (int64, error) {
ek := db.sEncodeSetKey(key, member) ek := db.sEncodeSetKey(key, member)
@ -393,6 +400,7 @@ func (db *DB) SIsMember(key []byte, member []byte) (int64, error) {
return n, nil return n, nil
} }
// SMembers gets members of set.
func (db *DB) SMembers(key []byte) ([][]byte, error) { func (db *DB) SMembers(key []byte) ([][]byte, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return nil, err return nil, err
@ -418,6 +426,7 @@ func (db *DB) SMembers(key []byte) ([][]byte, error) {
return v, nil return v, nil
} }
// SRem removes the members of set.
func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) { func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) {
t := db.setBatch t := db.setBatch
t.Lock() t.Lock()
@ -430,7 +439,7 @@ func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) {
it := db.bucket.NewIterator() it := db.bucket.NewIterator()
defer it.Close() defer it.Close()
var num int64 = 0 var num int64
for i := 0; i < len(args); i++ { for i := 0; i < len(args); i++ {
if err := checkSetKMSize(key, args[i]); err != nil { if err := checkSetKMSize(key, args[i]); err != nil {
return 0, err return 0, err
@ -487,11 +496,13 @@ func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) {
return slice, nil return slice, nil
} }
// SUnion unions the sets.
func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) { func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) {
v, err := db.sUnionGeneric(keys...) v, err := db.sUnionGeneric(keys...)
return v, err return v, err
} }
// SUnionStore unions the sets and stores to the dest set.
func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) { func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) {
n, err := db.sStoreGeneric(dstKey, UnionType, keys...) n, err := db.sStoreGeneric(dstKey, UnionType, keys...)
return n, err return n, err
@ -549,6 +560,7 @@ func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64,
return n, nil return n, nil
} }
// SClear clears the set.
func (db *DB) SClear(key []byte) (int64, error) { func (db *DB) SClear(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -565,6 +577,7 @@ func (db *DB) SClear(key []byte) (int64, error) {
return num, err return num, err
} }
// SMclear clears multi sets.
func (db *DB) SMclear(keys ...[]byte) (int64, error) { func (db *DB) SMclear(keys ...[]byte) (int64, error) {
t := db.setBatch t := db.setBatch
t.Lock() t.Lock()
@ -583,6 +596,7 @@ func (db *DB) SMclear(keys ...[]byte) (int64, error) {
return int64(len(keys)), err return int64(len(keys)), err
} }
// SExpire expries the set.
func (db *DB) SExpire(key []byte, duration int64) (int64, error) { func (db *DB) SExpire(key []byte, duration int64) (int64, error) {
if duration <= 0 { if duration <= 0 {
return 0, errExpireValue return 0, errExpireValue
@ -592,6 +606,7 @@ func (db *DB) SExpire(key []byte, duration int64) (int64, error) {
} }
// SExpireAt expires the set at when.
func (db *DB) SExpireAt(key []byte, when int64) (int64, error) { func (db *DB) SExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() { if when <= time.Now().Unix() {
return 0, errExpireValue return 0, errExpireValue
@ -601,6 +616,7 @@ func (db *DB) SExpireAt(key []byte, when int64) (int64, error) {
} }
// STTL gets the TTL of set.
func (db *DB) STTL(key []byte) (int64, error) { func (db *DB) STTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return -1, err return -1, err
@ -609,6 +625,7 @@ func (db *DB) STTL(key []byte) (int64, error) {
return db.ttl(SetType, key) return db.ttl(SetType, key)
} }
// SPersist removes the TTL of set.
func (db *DB) SPersist(key []byte) (int64, error) { func (db *DB) SPersist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err

View File

@ -119,18 +119,22 @@ func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) {
func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) { func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) {
mk := db.expEncodeMetaKey(dataType, key) mk := db.expEncodeMetaKey(dataType, key)
if v, err := db.bucket.Get(mk); err != nil { v, err := db.bucket.Get(mk)
if err != nil {
return 0, err return 0, err
} else if v == nil { } else if v == nil {
return 0, nil return 0, nil
} else if when, err2 := Int64(v, nil); err2 != nil { }
when, err2 := Int64(v, nil)
if err2 != nil {
return 0, err2 return 0, err2
} else { }
tk := db.expEncodeTimeKey(dataType, key, when) tk := db.expEncodeTimeKey(dataType, key, when)
t.Delete(mk) t.Delete(mk)
t.Delete(tk) t.Delete(tk)
return 1, nil return 1, nil
}
} }
func (c *ttlChecker) register(dataType byte, t *batch, f onExpired) { func (c *ttlChecker) register(dataType byte, t *batch, f onExpired) {

View File

@ -55,19 +55,19 @@ func listAdaptor(db *DB) *adaptor {
eles = append(eles, e) eles = append(eles, e)
} }
if n, err := db.LPush(k, eles...); err != nil { n, err := db.LPush(k, eles...)
if err != nil {
return 0, err return 0, err
} else {
return n, nil
} }
return n, nil
} }
adp.exists = func(k []byte) (int64, error) { adp.exists = func(k []byte) (int64, error) {
if llen, err := db.LLen(k); err != nil || llen <= 0 { if llen, err := db.LLen(k); err != nil || llen <= 0 {
return 0, err return 0, err
} else {
return 1, nil
} }
return 1, nil
} }
adp.del = db.LClear adp.del = db.LClear
@ -97,17 +97,15 @@ func hashAdaptor(db *DB) *adaptor {
if err := db.HMset(k, datas...); err != nil { if err := db.HMset(k, datas...); err != nil {
return 0, err return 0, err
} else {
return int64(len(datas)), nil
} }
return int64(len(datas)), nil
} }
adp.exists = func(k []byte) (int64, error) { adp.exists = func(k []byte) (int64, error) {
if hlen, err := db.HLen(k); err != nil || hlen <= 0 { if hlen, err := db.HLen(k); err != nil || hlen <= 0 {
return 0, err return 0, err
} else {
return 1, nil
} }
return 1, nil
} }
adp.del = db.HClear adp.del = db.HClear
@ -135,19 +133,18 @@ func zsetAdaptor(db *DB) *adaptor {
datas = append(datas, pair) datas = append(datas, pair)
} }
if n, err := db.ZAdd(k, datas...); err != nil { n, err := db.ZAdd(k, datas...)
if err != nil {
return 0, err return 0, err
} else {
return n, nil
} }
return n, nil
} }
adp.exists = func(k []byte) (int64, error) { adp.exists = func(k []byte) (int64, error) {
if cnt, err := db.ZCard(k); err != nil || cnt <= 0 { if cnt, err := db.ZCard(k); err != nil || cnt <= 0 {
return 0, err return 0, err
} else {
return 1, nil
} }
return 1, nil
} }
adp.del = db.ZClear adp.del = db.ZClear
@ -171,20 +168,19 @@ func setAdaptor(db *DB) *adaptor {
eles = append(eles, e) eles = append(eles, e)
} }
if n, err := db.SAdd(k, eles...); err != nil { n, err := db.SAdd(k, eles...)
if err != nil {
return 0, err return 0, err
} else {
return n, nil
} }
return n, nil
} }
adp.exists = func(k []byte) (int64, error) { adp.exists = func(k []byte) (int64, error) {
if slen, err := db.SCard(k); err != nil || slen <= 0 { if slen, err := db.SCard(k); err != nil || slen <= 0 {
return 0, err return 0, err
} else {
return 1, nil
} }
return 1, nil
} }
adp.del = db.SClear adp.del = db.SClear

View File

@ -10,6 +10,7 @@ import (
"github.com/siddontang/ledisdb/store" "github.com/siddontang/ledisdb/store"
) )
// For zset const.
const ( const (
MinScore int64 = -1<<63 + 1 MinScore int64 = -1<<63 + 1
MaxScore int64 = 1<<63 - 1 MaxScore int64 = 1<<63 - 1
@ -20,6 +21,7 @@ const (
AggregateMax byte = 2 AggregateMax byte = 2
) )
// ScorePair is the pair of score and member.
type ScorePair struct { type ScorePair struct {
Score int64 Score int64
Member []byte Member []byte
@ -238,7 +240,7 @@ func (db *DB) zSetItem(t *batch, key []byte, score int64, member []byte) (int64,
return 0, errScoreOverflow return 0, errScoreOverflow
} }
var exists int64 = 0 var exists int64
ek := db.zEncodeSetKey(key, member) ek := db.zEncodeSetKey(key, member)
if v, err := db.bucket.Get(ek); err != nil { if v, err := db.bucket.Get(ek); err != nil {
@ -246,13 +248,14 @@ func (db *DB) zSetItem(t *batch, key []byte, score int64, member []byte) (int64,
} else if v != nil { } else if v != nil {
exists = 1 exists = 1
if s, err := Int64(v, err); err != nil { s, err := Int64(v, err)
if err != nil {
return 0, err return 0, err
} else { }
sk := db.zEncodeScoreKey(key, member, s) sk := db.zEncodeScoreKey(key, member, s)
t.Delete(sk) t.Delete(sk)
} }
}
t.Put(ek, PutInt64(score)) t.Put(ek, PutInt64(score))
@ -273,14 +276,14 @@ func (db *DB) zDelItem(t *batch, key []byte, member []byte, skipDelScore bool) (
//exists //exists
if !skipDelScore { if !skipDelScore {
//we must del score //we must del score
if s, err := Int64(v, err); err != nil { s, err := Int64(v, err)
if err != nil {
return 0, err return 0, err
} else { }
sk := db.zEncodeScoreKey(key, member, s) sk := db.zEncodeScoreKey(key, member, s)
t.Delete(sk) t.Delete(sk)
} }
} }
}
t.Delete(ek) t.Delete(ek)
@ -300,15 +303,17 @@ func (db *DB) zExpireAt(key []byte, when int64) (int64, error) {
if zcnt, err := db.ZCard(key); err != nil || zcnt == 0 { if zcnt, err := db.ZCard(key); err != nil || zcnt == 0 {
return 0, err return 0, err
} else { }
db.expireAt(t, ZSetType, key, when) db.expireAt(t, ZSetType, key, when)
if err := t.Commit(); err != nil { if err := t.Commit(); err != nil {
return 0, err return 0, err
} }
}
return 1, nil return 1, nil
} }
// ZAdd add the members.
func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) { func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) {
if len(args) == 0 { if len(args) == 0 {
return 0, nil return 0, nil
@ -318,7 +323,7 @@ func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) {
t.Lock() t.Lock()
defer t.Unlock() defer t.Unlock()
var num int64 = 0 var num int64
for i := 0; i < len(args); i++ { for i := 0; i < len(args); i++ {
score := args[i].Score score := args[i].Score
member := args[i].Member member := args[i].Member
@ -349,7 +354,7 @@ func (db *DB) zIncrSize(t *batch, key []byte, delta int64) (int64, error) {
size, err := Int64(db.bucket.Get(sk)) size, err := Int64(db.bucket.Get(sk))
if err != nil { if err != nil {
return 0, err return 0, err
} else { }
size += delta size += delta
if size <= 0 { if size <= 0 {
size = 0 size = 0
@ -358,11 +363,11 @@ func (db *DB) zIncrSize(t *batch, key []byte, delta int64) (int64, error) {
} else { } else {
t.Put(sk, PutInt64(size)) t.Put(sk, PutInt64(size))
} }
}
return size, nil return size, nil
} }
// ZCard gets the size of the zset.
func (db *DB) ZCard(key []byte) (int64, error) { func (db *DB) ZCard(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -372,12 +377,13 @@ func (db *DB) ZCard(key []byte) (int64, error) {
return Int64(db.bucket.Get(sk)) return Int64(db.bucket.Get(sk))
} }
// ZScore gets the score of member.
func (db *DB) ZScore(key []byte, member []byte) (int64, error) { func (db *DB) ZScore(key []byte, member []byte) (int64, error) {
if err := checkZSetKMSize(key, member); err != nil { if err := checkZSetKMSize(key, member); err != nil {
return InvalidScore, err return InvalidScore, err
} }
var score int64 = InvalidScore score := InvalidScore
k := db.zEncodeSetKey(key, member) k := db.zEncodeSetKey(key, member)
if v, err := db.bucket.Get(k); err != nil { if v, err := db.bucket.Get(k); err != nil {
@ -393,6 +399,7 @@ func (db *DB) ZScore(key []byte, member []byte) (int64, error) {
return score, nil return score, nil
} }
// ZRem removes members
func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) { func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) {
if len(members) == 0 { if len(members) == 0 {
return 0, nil return 0, nil
@ -402,7 +409,7 @@ func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) {
t.Lock() t.Lock()
defer t.Unlock() defer t.Unlock()
var num int64 = 0 var num int64
for i := 0; i < len(members); i++ { for i := 0; i < len(members); i++ {
if err := checkZSetKMSize(key, members[i]); err != nil { if err := checkZSetKMSize(key, members[i]); err != nil {
return 0, err return 0, err
@ -423,6 +430,7 @@ func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) {
return num, err return num, err
} }
// ZIncrBy increases the score of member with delta.
func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) { func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) {
if err := checkZSetKMSize(key, member); err != nil { if err := checkZSetKMSize(key, member); err != nil {
return InvalidScore, err return InvalidScore, err
@ -434,7 +442,7 @@ func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) {
ek := db.zEncodeSetKey(key, member) ek := db.zEncodeSetKey(key, member)
var oldScore int64 = 0 var oldScore int64
v, err := db.bucket.Get(ek) v, err := db.bucket.Get(ek)
if err != nil { if err != nil {
return InvalidScore, err return InvalidScore, err
@ -465,6 +473,7 @@ func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) {
return newScore, err return newScore, err
} }
// ZCount gets the number of score in [min, max]
func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) { func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -475,7 +484,7 @@ func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) {
rangeType := store.RangeROpen rangeType := store.RangeROpen
it := db.bucket.RangeLimitIterator(minKey, maxKey, rangeType, 0, -1) it := db.bucket.RangeLimitIterator(minKey, maxKey, rangeType, 0, -1)
var n int64 = 0 var n int64
for ; it.Valid(); it.Next() { for ; it.Valid(); it.Next() {
n++ n++
} }
@ -494,12 +503,15 @@ func (db *DB) zrank(key []byte, member []byte, reverse bool) (int64, error) {
it := db.bucket.NewIterator() it := db.bucket.NewIterator()
defer it.Close() defer it.Close()
if v := it.Find(k); v == nil { v := it.Find(k)
if v == nil {
return -1, nil return -1, nil
} else { }
if s, err := Int64(v, nil); err != nil {
s, err := Int64(v, nil)
if err != nil {
return 0, err return 0, err
} else { }
var rit *store.RangeLimitIterator var rit *store.RangeLimitIterator
sk := db.zEncodeScoreKey(key, member, s) sk := db.zEncodeScoreKey(key, member, s)
@ -507,14 +519,14 @@ func (db *DB) zrank(key []byte, member []byte, reverse bool) (int64, error) {
if !reverse { if !reverse {
minKey := db.zEncodeStartScoreKey(key, MinScore) minKey := db.zEncodeStartScoreKey(key, MinScore)
rit = store.NewRangeIterator(it, &store.Range{minKey, sk, store.RangeClose}) rit = store.NewRangeIterator(it, &store.Range{Min: minKey, Max: sk, Type: store.RangeClose})
} else { } else {
maxKey := db.zEncodeStopScoreKey(key, MaxScore) maxKey := db.zEncodeStopScoreKey(key, MaxScore)
rit = store.NewRevRangeIterator(it, &store.Range{sk, maxKey, store.RangeClose}) rit = store.NewRevRangeIterator(it, &store.Range{Min: sk, Max: maxKey, Type: store.RangeClose})
} }
var lastKey []byte = nil var lastKey []byte
var n int64 = 0 var n int64
for ; rit.Valid(); rit.Next() { for ; rit.Valid(); rit.Next() {
n++ n++
@ -526,8 +538,6 @@ func (db *DB) zrank(key []byte, member []byte, reverse bool) (int64, error) {
n-- n--
return n, nil return n, nil
} }
}
}
return -1, nil return -1, nil
} }
@ -538,9 +548,8 @@ func (db *DB) zIterator(key []byte, min int64, max int64, offset int, count int,
if !reverse { if !reverse {
return db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) return db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count)
} else {
return db.bucket.RevRangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count)
} }
return db.bucket.RevRangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count)
} }
func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int, count int) (int64, error) { func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int, count int) (int64, error) {
@ -549,7 +558,7 @@ func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int,
} }
it := db.zIterator(key, min, max, offset, count, false) it := db.zIterator(key, min, max, offset, count, false)
var num int64 = 0 var num int64
for ; it.Valid(); it.Next() { for ; it.Valid(); it.Next() {
sk := it.RawKey() sk := it.RawKey()
_, m, _, err := db.zDecodeScoreKey(sk) _, m, _, err := db.zDecodeScoreKey(sk)
@ -659,6 +668,7 @@ func (db *DB) zParseLimit(key []byte, start int, stop int) (offset int, count in
return return
} }
// ZClear clears the zset.
func (db *DB) ZClear(key []byte) (int64, error) { func (db *DB) ZClear(key []byte) (int64, error) {
t := db.zsetBatch t := db.zsetBatch
t.Lock() t.Lock()
@ -672,6 +682,7 @@ func (db *DB) ZClear(key []byte) (int64, error) {
return rmCnt, err return rmCnt, err
} }
// ZMclear clears multi zsets.
func (db *DB) ZMclear(keys ...[]byte) (int64, error) { func (db *DB) ZMclear(keys ...[]byte) (int64, error) {
t := db.zsetBatch t := db.zsetBatch
t.Lock() t.Lock()
@ -688,21 +699,25 @@ func (db *DB) ZMclear(keys ...[]byte) (int64, error) {
return int64(len(keys)), err return int64(len(keys)), err
} }
// ZRange gets the members from start to stop.
func (db *DB) ZRange(key []byte, start int, stop int) ([]ScorePair, error) { func (db *DB) ZRange(key []byte, start int, stop int) ([]ScorePair, error) {
return db.ZRangeGeneric(key, start, stop, false) return db.ZRangeGeneric(key, start, stop, false)
} }
//min and max must be inclusive // ZRangeByScore gets the data with score in min and max.
//if no limit, set offset = 0 and count = -1 // min and max must be inclusive
// if no limit, set offset = 0 and count = -1
func (db *DB) ZRangeByScore(key []byte, min int64, max int64, func (db *DB) ZRangeByScore(key []byte, min int64, max int64,
offset int, count int) ([]ScorePair, error) { offset int, count int) ([]ScorePair, error) {
return db.ZRangeByScoreGeneric(key, min, max, offset, count, false) return db.ZRangeByScoreGeneric(key, min, max, offset, count, false)
} }
// ZRank gets the rank of member.
func (db *DB) ZRank(key []byte, member []byte) (int64, error) { func (db *DB) ZRank(key []byte, member []byte) (int64, error) {
return db.zrank(key, member, false) return db.zrank(key, member, false)
} }
// ZRemRangeByRank removes the member at range from start to stop.
func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) { func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) {
offset, count, err := db.zParseLimit(key, start, stop) offset, count, err := db.zParseLimit(key, start, stop)
if err != nil { if err != nil {
@ -723,7 +738,7 @@ func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) {
return rmCnt, err return rmCnt, err
} }
//min and max must be inclusive // ZRemRangeByScore removes the data with score at [min, max]
func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error) { func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error) {
t := db.zsetBatch t := db.zsetBatch
t.Lock() t.Lock()
@ -737,20 +752,24 @@ func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error)
return rmCnt, err return rmCnt, err
} }
// ZRevRange gets the data reversed.
func (db *DB) ZRevRange(key []byte, start int, stop int) ([]ScorePair, error) { func (db *DB) ZRevRange(key []byte, start int, stop int) ([]ScorePair, error) {
return db.ZRangeGeneric(key, start, stop, true) return db.ZRangeGeneric(key, start, stop, true)
} }
// ZRevRank gets the rank of member reversed.
func (db *DB) ZRevRank(key []byte, member []byte) (int64, error) { func (db *DB) ZRevRank(key []byte, member []byte) (int64, error) {
return db.zrank(key, member, true) return db.zrank(key, member, true)
} }
//min and max must be inclusive // ZRevRangeByScore gets the data with score at [min, max]
//if no limit, set offset = 0 and count = -1 // min and max must be inclusive
// if no limit, set offset = 0 and count = -1
func (db *DB) ZRevRangeByScore(key []byte, min int64, max int64, offset int, count int) ([]ScorePair, error) { func (db *DB) ZRevRangeByScore(key []byte, min int64, max int64, offset int, count int) ([]ScorePair, error) {
return db.ZRangeByScoreGeneric(key, min, max, offset, count, true) return db.ZRangeByScoreGeneric(key, min, max, offset, count, true)
} }
// ZRangeGeneric is a generic function for scan zset.
func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]ScorePair, error) { func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]ScorePair, error) {
offset, count, err := db.zParseLimit(key, start, stop) offset, count, err := db.zParseLimit(key, start, stop)
if err != nil { if err != nil {
@ -760,8 +779,9 @@ func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]Sc
return db.zRange(key, MinScore, MaxScore, offset, count, reverse) return db.zRange(key, MinScore, MaxScore, offset, count, reverse)
} }
//min and max must be inclusive // ZRangeByScoreGeneric is a generic function to scan zset with score.
//if no limit, set offset = 0 and count = -1 // min and max must be inclusive
// if no limit, set offset = 0 and count = -1
func (db *DB) ZRangeByScoreGeneric(key []byte, min int64, max int64, func (db *DB) ZRangeByScoreGeneric(key []byte, min int64, max int64,
offset int, count int, reverse bool) ([]ScorePair, error) { offset int, count int, reverse bool) ([]ScorePair, error) {
@ -775,6 +795,7 @@ func (db *DB) zFlush() (drop int64, err error) {
return db.flushType(t, ZSetType) return db.flushType(t, ZSetType)
} }
// ZExpire expires the zset.
func (db *DB) ZExpire(key []byte, duration int64) (int64, error) { func (db *DB) ZExpire(key []byte, duration int64) (int64, error) {
if duration <= 0 { if duration <= 0 {
return 0, errExpireValue return 0, errExpireValue
@ -783,6 +804,7 @@ func (db *DB) ZExpire(key []byte, duration int64) (int64, error) {
return db.zExpireAt(key, time.Now().Unix()+duration) return db.zExpireAt(key, time.Now().Unix()+duration)
} }
// ZExpireAt expires the zset at when.
func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) { func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() { if when <= time.Now().Unix() {
return 0, errExpireValue return 0, errExpireValue
@ -791,6 +813,7 @@ func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) {
return db.zExpireAt(key, when) return db.zExpireAt(key, when)
} }
// ZTTL gets the TTL of zset.
func (db *DB) ZTTL(key []byte) (int64, error) { func (db *DB) ZTTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return -1, err return -1, err
@ -799,6 +822,7 @@ func (db *DB) ZTTL(key []byte) (int64, error) {
return db.ttl(ZSetType, key) return db.ttl(ZSetType, key)
} }
// ZPersist removes the TTL of zset.
func (db *DB) ZPersist(key []byte) (int64, error) { func (db *DB) ZPersist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err
@ -841,6 +865,7 @@ func getAggregateFunc(aggregate byte) func(int64, int64) int64 {
return nil return nil
} }
// ZUnionStore unions the zsets and stores to dest zset.
func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) {
var destMap = map[string]int64{} var destMap = map[string]int64{}
@ -902,6 +927,7 @@ func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, agg
return n, nil return n, nil
} }
// ZInterStore intersects the zsets and stores to dest zset.
func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) {
aggregateFunc := getAggregateFunc(aggregate) aggregateFunc := getAggregateFunc(aggregate)
@ -960,7 +986,7 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg
} }
} }
var n int64 = int64(len(destMap)) n := int64(len(destMap))
sk := db.zEncodeSizeKey(destKey) sk := db.zEncodeSizeKey(destKey)
t.Put(sk, PutInt64(n)) t.Put(sk, PutInt64(n))
@ -970,6 +996,7 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg
return n, nil return n, nil
} }
// ZRangeByLex scans the zset lexicographically
func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, offset int, count int) ([][]byte, error) { func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, offset int, count int) ([][]byte, error) {
if min == nil { if min == nil {
min = db.zEncodeStartSetKey(key) min = db.zEncodeStartSetKey(key)
@ -995,6 +1022,7 @@ func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, o
return ay, nil return ay, nil
} }
// ZRemRangeByLex remvoes members in [min, max] lexicographically
func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) {
if min == nil { if min == nil {
min = db.zEncodeStartSetKey(key) min = db.zEncodeStartSetKey(key)
@ -1014,7 +1042,7 @@ func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8
it := db.bucket.RangeIterator(min, max, rangeType) it := db.bucket.RangeIterator(min, max, rangeType)
defer it.Close() defer it.Close()
var n int64 = 0 var n int64
for ; it.Valid(); it.Next() { for ; it.Valid(); it.Next() {
t.Delete(it.RawKey()) t.Delete(it.RawKey())
n++ n++
@ -1027,6 +1055,7 @@ func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8
return n, nil return n, nil
} }
// ZLexCount gets the count of zset lexicographically.
func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) {
if min == nil { if min == nil {
min = db.zEncodeStartSetKey(key) min = db.zEncodeStartSetKey(key)
@ -1042,7 +1071,7 @@ func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (in
it := db.bucket.RangeIterator(min, max, rangeType) it := db.bucket.RangeIterator(min, max, rangeType)
defer it.Close() defer it.Close()
var n int64 = 0 var n int64
for ; it.Valid(); it.Next() { for ; it.Valid(); it.Next() {
n++ n++
} }
@ -1050,6 +1079,7 @@ func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (in
return n, nil return n, nil
} }
// ZKeyExists checks zset existed or not.
func (db *DB) ZKeyExists(key []byte) (int64, error) { func (db *DB) ZKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil { if err := checkKeySize(key); err != nil {
return 0, err return 0, err

View File

@ -155,7 +155,7 @@ func TestZSetOrder(t *testing.T) {
} else { } else {
for i := 0; i < membCnt; i++ { for i := 0; i < membCnt; i++ {
if string(qMembs[i].Member) != membs[i] { if string(qMembs[i].Member) != membs[i] {
t.Fatal(fmt.Sprintf("[%s] vs [%s]", qMembs[i], membs[i])) t.Fatalf("[%v] vs [%v]", qMembs[i], membs[i])
} }
} }
} }

View File

@ -15,6 +15,7 @@ var errIntNumber = errors.New("invalid integer")
Maybe I was foolish at that time. Maybe I was foolish at that time.
*/ */
// Int64 gets 64 integer with the little endian format.
func Int64(v []byte, err error) (int64, error) { func Int64(v []byte, err error) (int64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
@ -27,6 +28,7 @@ func Int64(v []byte, err error) (int64, error) {
return int64(binary.LittleEndian.Uint64(v)), nil return int64(binary.LittleEndian.Uint64(v)), nil
} }
// Uint64 gets unsigned 64 integer.
func Uint64(v []byte, err error) (uint64, error) { func Uint64(v []byte, err error) (uint64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
@ -39,12 +41,14 @@ func Uint64(v []byte, err error) (uint64, error) {
return binary.LittleEndian.Uint64(v), nil return binary.LittleEndian.Uint64(v), nil
} }
// PutInt64 puts the 64 integer.
func PutInt64(v int64) []byte { func PutInt64(v int64) []byte {
b := make([]byte, 8) b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(v)) binary.LittleEndian.PutUint64(b, uint64(v))
return b return b
} }
// StrInt64 gets the 64 integer with string format.
func StrInt64(v []byte, err error) (int64, error) { func StrInt64(v []byte, err error) (int64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
@ -55,6 +59,7 @@ func StrInt64(v []byte, err error) (int64, error) {
} }
} }
// StrUint64 gets the unsigned 64 integer with string format.
func StrUint64(v []byte, err error) (uint64, error) { func StrUint64(v []byte, err error) (uint64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
@ -65,6 +70,7 @@ func StrUint64(v []byte, err error) (uint64, error) {
} }
} }
// StrInt32 gets the 32 integer with string format.
func StrInt32(v []byte, err error) (int32, error) { func StrInt32(v []byte, err error) (int32, error) {
if err != nil { if err != nil {
return 0, err return 0, err
@ -76,6 +82,7 @@ func StrInt32(v []byte, err error) (int32, error) {
} }
} }
// StrInt8 ets the 8 integer with string format.
func StrInt8(v []byte, err error) (int8, error) { func StrInt8(v []byte, err error) (int8, error) {
if err != nil { if err != nil {
return 0, err return 0, err
@ -87,6 +94,7 @@ func StrInt8(v []byte, err error) (int8, error) {
} }
} }
// AsyncNotify notices the channel.
func AsyncNotify(ch chan struct{}) { func AsyncNotify(ch chan struct{}) {
select { select {
case ch <- struct{}{}: case ch <- struct{}{}:

View File

@ -234,78 +234,78 @@ func TestHashErrorParams(t *testing.T) {
defer c.Close() defer c.Close()
if _, err := c.Do("hset", "test_hset"); err == nil { if _, err := c.Do("hset", "test_hset"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hget", "test_hget"); err == nil { if _, err := c.Do("hget", "test_hget"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hexists", "test_hexists"); err == nil { if _, err := c.Do("hexists", "test_hexists"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hdel", "test_hdel"); err == nil { if _, err := c.Do("hdel", "test_hdel"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hlen", "test_hlen", "a"); err == nil { if _, err := c.Do("hlen", "test_hlen", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hincrby", "test_hincrby"); err == nil { if _, err := c.Do("hincrby", "test_hincrby"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hmset", "test_hmset"); err == nil { if _, err := c.Do("hmset", "test_hmset"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hmset", "test_hmset", "f1", "v1", "f2"); err == nil { if _, err := c.Do("hmset", "test_hmset", "f1", "v1", "f2"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hmget", "test_hget"); err == nil { if _, err := c.Do("hmget", "test_hget"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hgetall"); err == nil { if _, err := c.Do("hgetall"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hkeys"); err == nil { if _, err := c.Do("hkeys"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hvals"); err == nil { if _, err := c.Do("hvals"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hclear"); err == nil { if _, err := c.Do("hclear"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hclear", "test_hclear", "a"); err == nil { if _, err := c.Do("hclear", "test_hclear", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hmclear"); err == nil { if _, err := c.Do("hmclear"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hexpire", "test_hexpire"); err == nil { if _, err := c.Do("hexpire", "test_hexpire"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hexpireat", "test_hexpireat"); err == nil { if _, err := c.Do("hexpireat", "test_hexpireat"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("httl"); err == nil { if _, err := c.Do("httl"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("hpersist"); err == nil { if _, err := c.Do("hpersist"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
} }

View File

@ -248,43 +248,43 @@ func TestKVErrorParams(t *testing.T) {
} }
if _, err := c.Do("del"); err == nil { if _, err := c.Do("del"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("mset"); err == nil { if _, err := c.Do("mset"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("mset", "a", "b", "c"); err == nil { if _, err := c.Do("mset", "a", "b", "c"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("mget"); err == nil { if _, err := c.Do("mget"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("expire"); err == nil { if _, err := c.Do("expire"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("expire", "a", "b"); err == nil { if _, err := c.Do("expire", "a", "b"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("expireat"); err == nil { if _, err := c.Do("expireat"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("expireat", "a", "b"); err == nil { if _, err := c.Do("expireat", "a", "b"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("ttl"); err == nil { if _, err := c.Do("ttl"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("persist"); err == nil { if _, err := c.Do("persist"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("setex", "a", "blah", "hello world"); err == nil { if _, err := c.Do("setex", "a", "blah", "hello world"); err == nil {

View File

@ -467,62 +467,62 @@ func TestListErrorParams(t *testing.T) {
defer c.Close() defer c.Close()
if _, err := c.Do("lpush", "test_lpush"); err == nil { if _, err := c.Do("lpush", "test_lpush"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("rpush", "test_rpush"); err == nil { if _, err := c.Do("rpush", "test_rpush"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lpop", "test_lpop", "a"); err == nil { if _, err := c.Do("lpop", "test_lpop", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("rpop", "test_rpop", "a"); err == nil { if _, err := c.Do("rpop", "test_rpop", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("llen", "test_llen", "a"); err == nil { if _, err := c.Do("llen", "test_llen", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lindex", "test_lindex"); err == nil { if _, err := c.Do("lindex", "test_lindex"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lrange", "test_lrange"); err == nil { if _, err := c.Do("lrange", "test_lrange"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lclear"); err == nil { if _, err := c.Do("lclear"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lmclear"); err == nil { if _, err := c.Do("lmclear"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lexpire"); err == nil { if _, err := c.Do("lexpire"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lexpireat"); err == nil { if _, err := c.Do("lexpireat"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lttl"); err == nil { if _, err := c.Do("lttl"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("lpersist"); err == nil { if _, err := c.Do("lpersist"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("ltrim_front", "test_ltrimfront", "-1"); err == nil { if _, err := c.Do("ltrim_front", "test_ltrimfront", "-1"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("ltrim_back", "test_ltrimback", "a"); err == nil { if _, err := c.Do("ltrim_back", "test_ltrimback", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
} }

View File

@ -118,99 +118,99 @@ func TestSetErrorParams(t *testing.T) {
defer c.Close() defer c.Close()
if _, err := c.Do("sadd", "test_sadd"); err == nil { if _, err := c.Do("sadd", "test_sadd"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("scard"); err == nil { if _, err := c.Do("scard"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("scard", "k1", "k2"); err == nil { if _, err := c.Do("scard", "k1", "k2"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sdiff"); err == nil { if _, err := c.Do("sdiff"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sdiffstore", "dstkey"); err == nil { if _, err := c.Do("sdiffstore", "dstkey"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sinter"); err == nil { if _, err := c.Do("sinter"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sinterstore", "dstkey"); err == nil { if _, err := c.Do("sinterstore", "dstkey"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sunion"); err == nil { if _, err := c.Do("sunion"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sunionstore", "dstkey"); err == nil { if _, err := c.Do("sunionstore", "dstkey"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sismember", "k1"); err == nil { if _, err := c.Do("sismember", "k1"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sismember", "k1", "m1", "m2"); err == nil { if _, err := c.Do("sismember", "k1", "m1", "m2"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("smembers"); err == nil { if _, err := c.Do("smembers"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("smembers", "k1", "k2"); err == nil { if _, err := c.Do("smembers", "k1", "k2"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("srem"); err == nil { if _, err := c.Do("srem"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("srem", "key"); err == nil { if _, err := c.Do("srem", "key"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sclear"); err == nil { if _, err := c.Do("sclear"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sclear", "k1", "k2"); err == nil { if _, err := c.Do("sclear", "k1", "k2"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("smclear"); err == nil { if _, err := c.Do("smclear"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sexpire", "set_expire"); err == nil { if _, err := c.Do("sexpire", "set_expire"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sexpire", "set_expire", "aaa"); err == nil { if _, err := c.Do("sexpire", "set_expire", "aaa"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sexpireat", "set_expireat"); err == nil { if _, err := c.Do("sexpireat", "set_expireat"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sexpireat", "set_expireat", "aaa"); err == nil { if _, err := c.Do("sexpireat", "set_expireat", "aaa"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("sttl"); err == nil { if _, err := c.Do("sttl"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("spersist"); err == nil { if _, err := c.Do("spersist"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
} }

View File

@ -451,166 +451,166 @@ func TestZsetErrorParams(t *testing.T) {
//zadd //zadd
if _, err := c.Do("zadd", "test_zadd"); err == nil { if _, err := c.Do("zadd", "test_zadd"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zadd", "test_zadd", "a", "b", "c"); err == nil { if _, err := c.Do("zadd", "test_zadd", "a", "b", "c"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zadd", "test_zadd", "-a", "a"); err == nil { if _, err := c.Do("zadd", "test_zadd", "-a", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zadd", "test_zad", "0.1", "a"); err == nil { if _, err := c.Do("zadd", "test_zad", "0.1", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zcard //zcard
if _, err := c.Do("zcard"); err == nil { if _, err := c.Do("zcard"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zscore //zscore
if _, err := c.Do("zscore", "test_zscore"); err == nil { if _, err := c.Do("zscore", "test_zscore"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zrem //zrem
if _, err := c.Do("zrem", "test_zrem"); err == nil { if _, err := c.Do("zrem", "test_zrem"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zincrby //zincrby
if _, err := c.Do("zincrby", "test_zincrby"); err == nil { if _, err := c.Do("zincrby", "test_zincrby"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zincrby", "test_zincrby", 0.1, "a"); err == nil { if _, err := c.Do("zincrby", "test_zincrby", 0.1, "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zcount //zcount
if _, err := c.Do("zcount", "test_zcount"); err == nil { if _, err := c.Do("zcount", "test_zcount"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zcount", "test_zcount", "-inf", "=inf"); err == nil { if _, err := c.Do("zcount", "test_zcount", "-inf", "=inf"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zcount", "test_zcount", 0.1, 0.1); err == nil { if _, err := c.Do("zcount", "test_zcount", 0.1, 0.1); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zrank //zrank
if _, err := c.Do("zrank", "test_zrank"); err == nil { if _, err := c.Do("zrank", "test_zrank"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zrevzrank //zrevzrank
if _, err := c.Do("zrevrank", "test_zrevrank"); err == nil { if _, err := c.Do("zrevrank", "test_zrevrank"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zremrangebyrank //zremrangebyrank
if _, err := c.Do("zremrangebyrank", "test_zremrangebyrank"); err == nil { if _, err := c.Do("zremrangebyrank", "test_zremrangebyrank"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zremrangebyrank", "test_zremrangebyrank", 0.1, 0.1); err == nil { if _, err := c.Do("zremrangebyrank", "test_zremrangebyrank", 0.1, 0.1); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zremrangebyscore //zremrangebyscore
if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore"); err == nil { if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore", "-inf", "a"); err == nil { if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore", "-inf", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore", 0, "a"); err == nil { if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore", 0, "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zrange //zrange
if _, err := c.Do("zrange", "test_zrange"); err == nil { if _, err := c.Do("zrange", "test_zrange"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zrange", "test_zrange", 0, 1, "withscore"); err == nil { if _, err := c.Do("zrange", "test_zrange", 0, 1, "withscore"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zrange", "test_zrange", 0, 1, "withscores", "a"); err == nil { if _, err := c.Do("zrange", "test_zrange", 0, 1, "withscores", "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zrevrange, almost same as zrange //zrevrange, almost same as zrange
if _, err := c.Do("zrevrange", "test_zrevrange"); err == nil { if _, err := c.Do("zrevrange", "test_zrevrange"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zrangebyscore //zrangebyscore
if _, err := c.Do("zrangebyscore", "test_zrangebyscore"); err == nil { if _, err := c.Do("zrangebyscore", "test_zrangebyscore"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscore"); err == nil { if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscore"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit"); err == nil { if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limi", 1, 1); err == nil { if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limi", 1, 1); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit", "a", 1); err == nil { if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit", "a", 1); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit", 1, "a"); err == nil { if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit", 1, "a"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zrevrangebyscore, almost same as zrangebyscore //zrevrangebyscore, almost same as zrangebyscore
if _, err := c.Do("zrevrangebyscore", "test_zrevrangebyscore"); err == nil { if _, err := c.Do("zrevrangebyscore", "test_zrevrangebyscore"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zclear //zclear
if _, err := c.Do("zclear"); err == nil { if _, err := c.Do("zclear"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zmclear //zmclear
if _, err := c.Do("zmclear"); err == nil { if _, err := c.Do("zmclear"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zexpire //zexpire
if _, err := c.Do("zexpire", "test_zexpire"); err == nil { if _, err := c.Do("zexpire", "test_zexpire"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zexpireat //zexpireat
if _, err := c.Do("zexpireat", "test_zexpireat"); err == nil { if _, err := c.Do("zexpireat", "test_zexpireat"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zttl //zttl
if _, err := c.Do("zttl"); err == nil { if _, err := c.Do("zttl"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
//zpersist //zpersist
if _, err := c.Do("zpersist"); err == nil { if _, err := c.Do("zpersist"); err == nil {
t.Fatal("invalid err of %v", err) t.Fatalf("invalid err of %v", err)
} }
} }