From 0c604eb6841c679ea15e42ce00aad1b751dc89f0 Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 29 Mar 2018 20:33:36 +0800 Subject: [PATCH] fix vet and lint (#337) --- cmd/ledis-cli/main.go | 2 +- ledis/const.go | 52 +++++++------ ledis/dump.go | 21 +++-- ledis/event.go | 118 ++++++++++++++++------------- ledis/ledis.go | 9 ++- ledis/ledis_db.go | 14 ++-- ledis/migrate.go | 6 ++ ledis/replication.go | 63 ++++++++------- ledis/scan.go | 12 ++- ledis/sort.go | 20 ++--- ledis/t_hash.go | 55 +++++++++----- ledis/t_kv.go | 57 ++++++++++---- ledis/t_list.go | 87 +++++++++++++-------- ledis/t_set.go | 53 ++++++++----- ledis/t_ttl.go | 20 +++-- ledis/t_ttl_test.go | 36 ++++----- ledis/t_zset.go | 164 ++++++++++++++++++++++++---------------- ledis/t_zset_test.go | 2 +- ledis/util.go | 8 ++ server/cmd_hash_test.go | 38 +++++----- server/cmd_kv_test.go | 20 ++--- server/cmd_list_test.go | 30 ++++---- server/cmd_set_test.go | 48 ++++++------ server/cmd_zset_test.go | 72 +++++++++--------- 24 files changed, 589 insertions(+), 418 deletions(-) diff --git a/cmd/ledis-cli/main.go b/cmd/ledis-cli/main.go index 5a3ef89..71ce579 100644 --- a/cmd/ledis-cli/main.go +++ b/cmd/ledis-cli/main.go @@ -199,7 +199,7 @@ func loadHisotry() { func saveHisotry() { if f, err := os.Create(historyPath); err != nil { - fmt.Printf("Error writing history file: ", err) + fmt.Printf("Error writing history file, err: %v", err) } else { line.WriteHistory(f) f.Close() diff --git a/ledis/const.go b/ledis/const.go index d35ca3b..610dada 100644 --- a/ledis/const.go +++ b/ledis/const.go @@ -4,8 +4,10 @@ import ( "errors" ) +// Version is for version const Version = "0.5" +// DataType is defined for the different types type DataType byte // for out use @@ -34,6 +36,7 @@ func (d DataType) String() string { } } +// For different type name const ( KVName = "KV" ListName = "LIST" @@ -71,24 +74,23 @@ const ( MetaType byte = 201 ) -var ( - TypeName = map[byte]string{ - KVType: "kv", - HashType: "hash", - HSizeType: "hsize", - ListType: "list", - LMetaType: "lmeta", - ZSetType: "zset", - ZSizeType: "zsize", - ZScoreType: "zscore", - // BitType: "bit", - // BitMetaType: "bitmeta", - SetType: "set", - SSizeType: "ssize", - ExpTimeType: "exptime", - ExpMetaType: "expmeta", - } -) +// TypeName is the map of type -> name +var TypeName = map[byte]string{ + KVType: "kv", + HashType: "hash", + HSizeType: "hsize", + ListType: "list", + LMetaType: "lmeta", + ZSetType: "zset", + ZSizeType: "zsize", + ZScoreType: "zscore", + // BitType: "bit", + // BitMetaType: "bitmeta", + SetType: "set", + SSizeType: "ssize", + ExpTimeType: "exptime", + ExpMetaType: "expmeta", +} const ( defaultScanCount int = 10 @@ -104,25 +106,28 @@ var ( errListIndex = errors.New("invalid list index") ) +// For different const size configuration const ( + // max allowed databases MaxDatabases int = 10240 - //max key size + // max key size MaxKeySize int = 1024 - //max hash field size + // max hash field size MaxHashFieldSize int = 1024 - //max zset member size + // max zset member size MaxZSetMemberSize int = 1024 - //max set member size + // max set member size MaxSetMemberSize int = 1024 - //max value size + // max value size MaxValueSize int = 1024 * 1024 * 1024 ) +// For different common errors var ( ErrScoreMiss = errors.New("zset score miss") ErrWriteInROnly = errors.New("write not support in readonly mode") @@ -136,6 +141,7 @@ var ( // DBInMulti uint8 = 0x2 // ) +// For bit operation const ( BitAND = "and" BitOR = "or" diff --git a/ledis/dump.go b/ledis/dump.go index 3e01ec2..9784575 100644 --- a/ledis/dump.go +++ b/ledis/dump.go @@ -11,26 +11,22 @@ import ( "github.com/siddontang/ledisdb/store" ) +// DumpHead is the head of a dump. type DumpHead struct { CommitID uint64 } +// Read reads meta from the Reader. func (h *DumpHead) Read(r io.Reader) error { - if err := binary.Read(r, binary.BigEndian, &h.CommitID); err != nil { - return err - } - - return nil + return binary.Read(r, binary.BigEndian, &h.CommitID) } +// Write writes meta to the Writer func (h *DumpHead) Write(w io.Writer) error { - if err := binary.Write(w, binary.BigEndian, h.CommitID); err != nil { - return err - } - - return nil + return binary.Write(w, binary.BigEndian, h.CommitID) } +// DumpFile dumps data to the file func (l *Ledis) DumpFile(path string) error { f, err := os.Create(path) if err != nil { @@ -41,6 +37,7 @@ func (l *Ledis) DumpFile(path string) error { return l.Dump(f) } +// Dump dumps data to the Writer. func (l *Ledis) Dump(w io.Writer) error { var err error @@ -118,7 +115,7 @@ func (l *Ledis) Dump(w io.Writer) error { return nil } -// clear all data and load dump file to db +// LoadDumpFile clears all data and loads dump file to db func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) { f, err := os.Open(path) if err != nil { @@ -129,7 +126,7 @@ func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) { return l.LoadDump(f) } -// clear all data and load dump file to db +// LoadDump clears all data and loads dump file to db func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) { l.wLock.Lock() defer l.wLock.Unlock() diff --git a/ledis/event.go b/ledis/event.go index d14309d..f35c893 100644 --- a/ledis/event.go +++ b/ledis/event.go @@ -29,95 +29,105 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { switch k[1] { case KVType: - if key, err := db.decodeKVKey(k); err != nil { + key, err := db.decodeKVKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) } + buf = strconv.AppendQuote(buf, hack.String(key)) case HashType: - if key, field, err := db.hDecodeHashKey(k); err != nil { + key, field, err := db.hDecodeHashKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(field)) } + + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(field)) case HSizeType: - if key, err := db.hDecodeSizeKey(k); err != nil { + key, err := db.hDecodeSizeKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) } + + buf = strconv.AppendQuote(buf, hack.String(key)) case ListType: - if key, seq, err := db.lDecodeListKey(k); err != nil { + key, seq, err := db.lDecodeListKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, int64(seq), 10) } + + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, int64(seq), 10) case LMetaType: - if key, err := db.lDecodeMetaKey(k); err != nil { + key, err := db.lDecodeMetaKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) } + + buf = strconv.AppendQuote(buf, hack.String(key)) case ZSetType: - if key, m, err := db.zDecodeSetKey(k); err != nil { + key, m, err := db.zDecodeSetKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(m)) } + + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(m)) case ZSizeType: - if key, err := db.zDecodeSizeKey(k); err != nil { + key, err := db.zDecodeSizeKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) } + + buf = strconv.AppendQuote(buf, hack.String(key)) case ZScoreType: - if key, m, score, err := db.zDecodeScoreKey(k); err != nil { + key, m, score, err := db.zDecodeScoreKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(m)) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, score, 10) } + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(m)) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, score, 10) case SetType: - if key, member, err := db.sDecodeSetKey(k); err != nil { + key, member, err := db.sDecodeSetKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(member)) } + + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(member)) case SSizeType: - if key, err := db.sDecodeSizeKey(k); err != nil { + key, err := db.sDecodeSizeKey(k) + if err != nil { return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) } + + buf = strconv.AppendQuote(buf, hack.String(key)) case ExpTimeType: - if tp, key, t, err := db.expDecodeTimeKey(k); err != nil { + tp, key, t, err := db.expDecodeTimeKey(k) + if err != nil { return nil, err - } else { - buf = append(buf, TypeName[tp]...) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, t, 10) } + + buf = append(buf, TypeName[tp]...) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, t, 10) case ExpMetaType: - if tp, key, err := db.expDecodeMetaKey(k); err != nil { + tp, key, err := db.expDecodeMetaKey(k) + if err != nil { return nil, err - } else { - buf = append(buf, TypeName[tp]...) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(key)) } + + buf = append(buf, TypeName[tp]...) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(key)) default: return nil, errInvalidEvent } diff --git a/ledis/ledis.go b/ledis/ledis.go index 8d654ba..ab8543c 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -15,6 +15,7 @@ import ( "github.com/siddontang/ledisdb/store" ) +// Ledis is the core structure to handle the database. type Ledis struct { cfg *config.Config @@ -42,6 +43,7 @@ type Ledis struct { ttlCheckerCh chan *ttlChecker } +// Open opens the Ledis with a config. func Open(cfg *config.Config) (*Ledis, error) { if len(cfg.DataDir) == 0 { cfg.DataDir = config.DefaultDataDir @@ -96,6 +98,7 @@ func Open(cfg *config.Config) (*Ledis, error) { return l, nil } +// Close closes the Ledis. func (l *Ledis) Close() { close(l.quit) l.wg.Wait() @@ -113,6 +116,7 @@ func (l *Ledis) Close() { } } +// Select chooses a database. func (l *Ledis) Select(index int) (*DB, error) { if index < 0 || index >= l.cfg.Databases { return nil, fmt.Errorf("invalid db index %d, must in [0, %d]", index, l.cfg.Databases-1) @@ -136,7 +140,7 @@ func (l *Ledis) Select(index int) (*DB, error) { return db, nil } -// Flush All will clear all data and replication logs +// FlushAll will clear all data and replication logs func (l *Ledis) FlushAll() error { l.wLock.Lock() defer l.wLock.Unlock() @@ -181,6 +185,7 @@ func (l *Ledis) flushAll() error { return nil } +// IsReadOnly returns whether Ledis is read only or not. func (l *Ledis) IsReadOnly() bool { if l.cfg.GetReadonly() { return true @@ -229,10 +234,12 @@ func (l *Ledis) checkTTL() { } +// StoreStat returns the statistics. func (l *Ledis) StoreStat() *store.Stat { return l.ldb.Stat() } +// CompactStore compacts the backend storage. func (l *Ledis) CompactStore() error { l.wLock.Lock() defer l.wLock.Unlock() diff --git a/ledis/ledis_db.go b/ledis/ledis_db.go index 7b3ff0f..4d250b5 100644 --- a/ledis/ledis_db.go +++ b/ledis/ledis_db.go @@ -26,6 +26,7 @@ type ibucket interface { RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator } +// DB is the database. type DB struct { l *Ledis @@ -130,6 +131,7 @@ func (db *DB) newBatch() *batch { return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}) } +// Index gets the index of database. func (db *DB) Index() int { return int(db.index) } @@ -138,6 +140,7 @@ func (db *DB) Index() int { // return db.status == DBAutoCommit // } +// FlushAll flushes the data. func (db *DB) FlushAll() (drop int64, err error) { all := [...](func() (int64, error)){ db.flush, @@ -147,12 +150,13 @@ func (db *DB) FlushAll() (drop int64, err error) { db.sFlush} for _, flush := range all { - if n, e := flush(); e != nil { + n, e := flush() + if e != nil { err = e return - } else { - drop += n } + + drop += n } return @@ -195,9 +199,9 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { if err = t.Commit(); err != nil { return - } else { - drop += int64(len(keys)) } + + drop += int64(len(keys)) keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false) } return diff --git a/ledis/migrate.go b/ledis/migrate.go index aca8a86..eea5b10 100644 --- a/ledis/migrate.go +++ b/ledis/migrate.go @@ -14,6 +14,7 @@ import ( Only support rdb version 6. */ +// Dump dumps the KV value of key func (db *DB) Dump(key []byte) ([]byte, error) { v, err := db.Get(key) if err != nil { @@ -25,6 +26,7 @@ func (db *DB) Dump(key []byte) ([]byte, error) { return rdb.Dump(rdb.String(v)) } +// LDump dumps the list value of key func (db *DB) LDump(key []byte) ([]byte, error) { v, err := db.LRange(key, 0, -1) if err != nil { @@ -36,6 +38,7 @@ func (db *DB) LDump(key []byte) ([]byte, error) { return rdb.Dump(rdb.List(v)) } +// HDump dumps the hash value of key func (db *DB) HDump(key []byte) ([]byte, error) { v, err := db.HGetAll(key) if err != nil { @@ -53,6 +56,7 @@ func (db *DB) HDump(key []byte) ([]byte, error) { return rdb.Dump(o) } +// SDump dumps the set value of key func (db *DB) SDump(key []byte) ([]byte, error) { v, err := db.SMembers(key) if err != nil { @@ -64,6 +68,7 @@ func (db *DB) SDump(key []byte) ([]byte, error) { return rdb.Dump(rdb.Set(v)) } +// ZDump dumps the zset value of key func (db *DB) ZDump(key []byte) ([]byte, error) { v, err := db.ZRangeByScore(key, MinScore, MaxScore, 0, -1) if err != nil { @@ -81,6 +86,7 @@ func (db *DB) ZDump(key []byte) ([]byte, error) { return rdb.Dump(o) } +// Restore restores a key into database. func (db *DB) Restore(key []byte, ttl int64, data []byte) error { d, err := rdb.DecodeDump(data) if err != nil { diff --git a/ledis/replication.go b/ledis/replication.go index 20c2099..b09bdb5 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -16,10 +16,12 @@ const ( maxReplLogSize = 1 * 1024 * 1024 ) +// For replication error. var ( ErrLogMissed = errors.New("log is pured in server") ) +// ReplicationUsed returns whether replication is used or not. func (l *Ledis) ReplicationUsed() bool { return l.r != nil } @@ -38,40 +40,39 @@ func (l *Ledis) handleReplication() error { if err != rpl.ErrNoBehindLog { log.Errorf("get next commit log err, %s", err.Error) return err - } else { - return nil - } - } else { - l.rbatch.Rollback() - - if rl.Compression == 1 { - //todo optimize - if rl.Data, err = snappy.Decode(nil, rl.Data); err != nil { - log.Errorf("decode log error %s", err.Error()) - return err - } } - if bd, err := store.NewBatchData(rl.Data); err != nil { - log.Errorf("decode batch log error %s", err.Error()) - return err - } else if err = bd.Replay(l.rbatch); err != nil { - log.Errorf("replay batch log error %s", err.Error()) - } + return nil + } - l.commitLock.Lock() - if err = l.rbatch.Commit(); err != nil { - log.Errorf("commit log error %s", err.Error()) - } else if err = l.r.UpdateCommitID(rl.ID); err != nil { - log.Errorf("update commit id error %s", err.Error()) - } + l.rbatch.Rollback() - l.commitLock.Unlock() - if err != nil { + if rl.Compression == 1 { + //todo optimize + if rl.Data, err = snappy.Decode(nil, rl.Data); err != nil { + log.Errorf("decode log error %s", err.Error()) return err } } + if bd, err := store.NewBatchData(rl.Data); err != nil { + log.Errorf("decode batch log error %s", err.Error()) + return err + } else if err = bd.Replay(l.rbatch); err != nil { + log.Errorf("replay batch log error %s", err.Error()) + } + + l.commitLock.Lock() + if err = l.rbatch.Commit(); err != nil { + log.Errorf("commit log error %s", err.Error()) + } else if err = l.r.UpdateCommitID(rl.ID); err != nil { + log.Errorf("update commit id error %s", err.Error()) + } + + l.commitLock.Unlock() + if err != nil { + return err + } } } @@ -90,6 +91,7 @@ func (l *Ledis) onReplication() { } } +// WaitReplication waits replication done func (l *Ledis) WaitReplication() error { if !l.ReplicationUsed() { return ErrRplNotSupport @@ -117,6 +119,7 @@ func (l *Ledis) WaitReplication() error { return errors.New("wait replication too many times") } +// StoreLogsFromReader stores logs from the Reader func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { if !l.ReplicationUsed() { return ErrRplNotSupport @@ -150,12 +153,14 @@ func (l *Ledis) noticeReplication() { AsyncNotify(l.rc) } +// StoreLogsFromData stores logs from data. func (l *Ledis) StoreLogsFromData(data []byte) error { rb := bytes.NewReader(data) return l.StoreLogsFromReader(rb) } +// ReadLogsTo reads logs and write to the Writer. func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uint64, err error) { if !l.ReplicationUsed() { // no replication log @@ -205,7 +210,8 @@ func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uin return } -// try to read events, if no events read, try to wait the new event singal until timeout seconds +// ReadLogsToTimeout tries to read events, if no events read, +// tres to wait the new event singal until timeout seconds func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int, quitCh chan struct{}) (n int, nextLogID uint64, err error) { n, nextLogID, err = l.ReadLogsTo(startLogID, w) if err != nil { @@ -229,8 +235,10 @@ func (l *Ledis) propagate(rl *rpl.Log) { } } +// NewLogEventHandler is the handler to handle new log event. type NewLogEventHandler func(rl *rpl.Log) +// AddNewLogEventHandler adds the handler for the new log event func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error { if !l.ReplicationUsed() { return ErrRplNotSupport @@ -241,6 +249,7 @@ func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error { return nil } +// ReplicationStat returns the statistics of repliaciton. func (l *Ledis) ReplicationStat() (*rpl.Stat, error) { if !l.ReplicationUsed() { return nil, ErrRplNotSupport diff --git a/ledis/scan.go b/ledis/scan.go index c4540a6..b91640f 100644 --- a/ledis/scan.go +++ b/ledis/scan.go @@ -10,7 +10,7 @@ import ( var errDataType = errors.New("error data type") var errMetaKey = errors.New("error meta key") -//fif inclusive is true, scan range [cursor, inf) else (cursor, inf) +//Scan scans the data. If inclusive is true, scan range [cursor, inf) else (cursor, inf) func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { storeDataType, err := getDataStoreType(dataType) if err != nil { @@ -20,7 +20,7 @@ func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false) } -//if inclusive is true, revscan range (-inf, cursor] else (inf, cursor) +// RevScan scans the data reversed. if inclusive is true, revscan range (-inf, cursor] else (inf, cursor) func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { storeDataType, err := getDataStoreType(dataType) if err != nil { @@ -51,7 +51,7 @@ func getDataStoreType(dataType DataType) (byte, error) { func buildMatchRegexp(match string) (*regexp.Regexp, error) { var err error - var r *regexp.Regexp = nil + var r *regexp.Regexp if len(match) > 0 { if r, err = regexp.Compile(match); err != nil { @@ -300,10 +300,12 @@ func (db *DB) hScanGeneric(key []byte, cursor []byte, count int, inclusive bool, return v, nil } +// HScan scans data for hash. func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { return db.hScanGeneric(key, cursor, count, inclusive, match, false) } +// HRevScan reversed scans data for hash. func (db *DB) HRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { return db.hScanGeneric(key, cursor, count, inclusive, match, true) } @@ -341,10 +343,12 @@ func (db *DB) sScanGeneric(key []byte, cursor []byte, count int, inclusive bool, return v, nil } +// SScan scans data for set. func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { return db.sScanGeneric(key, cursor, count, inclusive, match, false) } +// SRevScan scans data reversed for set. func (db *DB) SRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { return db.sScanGeneric(key, cursor, count, inclusive, match, true) } @@ -387,10 +391,12 @@ func (db *DB) zScanGeneric(key []byte, cursor []byte, count int, inclusive bool, return v, nil } +// ZScan scans data for zset. func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { return db.zScanGeneric(key, cursor, count, inclusive, match, false) } +// ZRevScan scans data reversed for zset. func (db *DB) ZRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { return db.zScanGeneric(key, cursor, count, inclusive, match, true) } diff --git a/ledis/sort.go b/ledis/sort.go index 6a54c07..413db10 100644 --- a/ledis/sort.go +++ b/ledis/sort.go @@ -9,6 +9,7 @@ import ( "github.com/siddontang/ledisdb/store" ) +// Limit is for sort. type Limit struct { Offset int Size int @@ -52,7 +53,7 @@ func (db *DB) lookupKeyByPattern(pattern []byte, subKey []byte) []byte { } key := pattern - var field []byte = nil + var field []byte // Find out if we're dealing with a hash dereference if n := bytes.Index(pattern, hashPattern); n > 0 && n+3 < len(pattern) { @@ -109,17 +110,15 @@ func (s *sortItemSlice) Less(i, j int) bool { if s1.cmpValue == nil || s2.cmpValue == nil { if s1.cmpValue == nil { return true - } else { - return false } - } else { - // Unlike redis, we only use bytes compare - return bytes.Compare(s1.cmpValue, s2.cmpValue) < 0 + return false } - } else { // Unlike redis, we only use bytes compare - return bytes.Compare(s1.value, s2.value) < 0 + return bytes.Compare(s1.cmpValue, s2.cmpValue) < 0 } + + // Unlike redis, we only use bytes compare + return bytes.Compare(s1.value, s2.value) < 0 } } @@ -184,7 +183,7 @@ func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool } } - var resLen int = end - start + 1 + resLen := end - start + 1 if len(sortGet) > 0 { resLen = len(sortGet) * (end - start + 1) } @@ -204,6 +203,7 @@ func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool return res, nil } +// XLSort sorts list. func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { values, err := db.LRange(key, 0, -1) @@ -214,6 +214,7 @@ func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, so return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) } +// XSSort sorts set. func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { values, err := db.SMembers(key) if err != nil { @@ -223,6 +224,7 @@ func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, so return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) } +// XZSort sorts zset. func (db *DB) XZSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { values, err := db.ZRangeByLex(key, nil, nil, store.RangeClose, 0, -1) if err != nil { diff --git a/ledis/t_hash.go b/ledis/t_hash.go index c822e23..b9a8595 100644 --- a/ledis/t_hash.go +++ b/ledis/t_hash.go @@ -9,6 +9,7 @@ import ( "github.com/siddontang/ledisdb/store" ) +// FVPair is the pair of field and value. type FVPair struct { Field []byte Value []byte @@ -154,7 +155,7 @@ func (db *DB) hDelete(t *batch, key []byte) int64 { start := db.hEncodeStartKey(key) stop := db.hEncodeStopKey(key) - var num int64 = 0 + var num int64 it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) for ; it.Valid(); it.Next() { t.Delete(it.Key()) @@ -173,15 +174,17 @@ func (db *DB) hExpireAt(key []byte, when int64) (int64, error) { if hlen, err := db.HLen(key); err != nil || hlen == 0 { return 0, err - } else { - db.expireAt(t, HashType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } } + + db.expireAt(t, HashType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + return 1, nil } +// HLen returns the lengh of hash. func (db *DB) HLen(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -190,6 +193,7 @@ func (db *DB) HLen(key []byte) (int64, error) { return Int64(db.bucket.Get(db.hEncodeSizeKey(key))) } +// HSet sets the field with value of key. func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) { if err := checkHashKFSize(key, field); err != nil { return 0, err @@ -210,6 +214,7 @@ func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) { return n, err } +// HGet gets the value of the field. func (db *DB) HGet(key []byte, field []byte) ([]byte, error) { if err := checkHashKFSize(key, field); err != nil { return nil, err @@ -218,6 +223,7 @@ func (db *DB) HGet(key []byte, field []byte) ([]byte, error) { return db.bucket.Get(db.hEncodeHashKey(key, field)) } +// HMset sets multi field-values. func (db *DB) HMset(key []byte, args ...FVPair) error { t := db.hashBatch t.Lock() @@ -225,7 +231,7 @@ func (db *DB) HMset(key []byte, args ...FVPair) error { var err error var ek []byte - var num int64 = 0 + var num int64 for i := 0; i < len(args); i++ { if err := checkHashKFSize(key, args[i].Field); err != nil { return err @@ -253,6 +259,7 @@ func (db *DB) HMset(key []byte, args ...FVPair) error { return err } +// HMget gets multi values of fields func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) { var ek []byte @@ -273,6 +280,7 @@ func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) { return r, nil } +// HDel deletes the fields. func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) { t := db.hashBatch @@ -286,7 +294,7 @@ func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) { it := db.bucket.NewIterator() defer it.Close() - var num int64 = 0 + var num int64 for i := 0; i < len(args); i++ { if err := checkHashKFSize(key, args[i]); err != nil { return 0, err @@ -317,23 +325,24 @@ func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) { sk := db.hEncodeSizeKey(key) var err error - var size int64 = 0 + var size int64 if size, err = Int64(db.bucket.Get(sk)); err != nil { return 0, err + } + + size += delta + if size <= 0 { + size = 0 + t.Delete(sk) + db.rmExpire(t, HashType, key) } else { - size += delta - if size <= 0 { - size = 0 - t.Delete(sk) - db.rmExpire(t, HashType, key) - } else { - t.Put(sk, PutInt64(size)) - } + t.Put(sk, PutInt64(size)) } return size, nil } +// HIncrBy increases the value of field by delta. func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { if err := checkHashKFSize(key, field); err != nil { return 0, err @@ -348,7 +357,7 @@ func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { ek = db.hEncodeHashKey(key, field) - var n int64 = 0 + var n int64 if n, err = StrInt64(db.bucket.Get(ek)); err != nil { return 0, err } @@ -365,6 +374,7 @@ func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { return n, err } +// HGetAll returns all field-values. func (db *DB) HGetAll(key []byte) ([]FVPair, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -390,6 +400,7 @@ func (db *DB) HGetAll(key []byte) ([]FVPair, error) { return v, nil } +// HKeys returns the all fields. func (db *DB) HKeys(key []byte) ([][]byte, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -414,6 +425,7 @@ func (db *DB) HKeys(key []byte) ([][]byte, error) { return v, nil } +// HValues returns all values func (db *DB) HValues(key []byte) ([][]byte, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -439,6 +451,7 @@ func (db *DB) HValues(key []byte) ([][]byte, error) { return v, nil } +// HClear clears the data. func (db *DB) HClear(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -455,6 +468,7 @@ func (db *DB) HClear(key []byte) (int64, error) { return num, err } +// HMclear cleans multi data. func (db *DB) HMclear(keys ...[]byte) (int64, error) { t := db.hashBatch t.Lock() @@ -482,6 +496,7 @@ func (db *DB) hFlush() (drop int64, err error) { return db.flushType(t, HashType) } +// HExpire expires the data with duration. func (db *DB) HExpire(key []byte, duration int64) (int64, error) { if duration <= 0 { return 0, errExpireValue @@ -490,6 +505,7 @@ func (db *DB) HExpire(key []byte, duration int64) (int64, error) { return db.hExpireAt(key, time.Now().Unix()+duration) } +// HExpireAt expires the data at time when. func (db *DB) HExpireAt(key []byte, when int64) (int64, error) { if when <= time.Now().Unix() { return 0, errExpireValue @@ -498,6 +514,7 @@ func (db *DB) HExpireAt(key []byte, when int64) (int64, error) { return db.hExpireAt(key, when) } +// HTTL gets the TTL of data. func (db *DB) HTTL(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return -1, err @@ -506,6 +523,7 @@ func (db *DB) HTTL(key []byte) (int64, error) { return db.ttl(HashType, key) } +// HPersist removes the TTL of data. func (db *DB) HPersist(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -524,6 +542,7 @@ func (db *DB) HPersist(key []byte) (int64, error) { return n, err } +// HKeyExists checks whether data exists or not. func (db *DB) HKeyExists(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err diff --git a/ledis/t_kv.go b/ledis/t_kv.go index 624287f..9abec2a 100644 --- a/ledis/t_kv.go +++ b/ledis/t_kv.go @@ -11,6 +11,7 @@ import ( "github.com/siddontang/ledisdb/store" ) +// KVPair is the pair of key-value. type KVPair struct { Key []byte Value []byte @@ -109,23 +110,27 @@ func (db *DB) setExpireAt(key []byte, when int64) (int64, error) { if exist, err := db.Exists(key); err != nil || exist == 0 { return 0, err - } else { - db.expireAt(t, KVType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } } + + db.expireAt(t, KVType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + return 1, nil } +// Decr decreases the data. func (db *DB) Decr(key []byte) (int64, error) { return db.incr(key, -1) } +// DecrBy decreases the data by decrement. func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) { return db.incr(key, -decrement) } +// Del deletes the data. func (db *DB) Del(keys ...[]byte) (int64, error) { if len(keys) == 0 { return 0, nil @@ -149,6 +154,7 @@ func (db *DB) Del(keys ...[]byte) (int64, error) { return int64(len(keys)), err } +// Exists check data exists or not. func (db *DB) Exists(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -166,6 +172,7 @@ func (db *DB) Exists(key []byte) (int64, error) { return 0, err } +// Get gets the value. func (db *DB) Get(key []byte) ([]byte, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -176,6 +183,7 @@ func (db *DB) Get(key []byte) ([]byte, error) { return db.bucket.Get(key) } +// GetSlice gets the slice of the data. func (db *DB) GetSlice(key []byte) (store.Slice, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -186,6 +194,7 @@ func (db *DB) GetSlice(key []byte) (store.Slice, error) { return db.bucket.GetSlice(key) } +// GetSet gets the value and sets new value. func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -212,14 +221,17 @@ func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) { return oldValue, err } +// Incr increases the data. func (db *DB) Incr(key []byte) (int64, error) { return db.incr(key, 1) } +// IncrBy increases the data by increment. func (db *DB) IncrBy(key []byte, increment int64) (int64, error) { return db.incr(key, increment) } +// MGet gets multi data. func (db *DB) MGet(keys ...[]byte) ([][]byte, error) { values := make([][]byte, len(keys)) @@ -237,6 +249,7 @@ func (db *DB) MGet(keys ...[]byte) ([][]byte, error) { return values, nil } +// MSet sets multi data. func (db *DB) MSet(args ...KVPair) error { if len(args) == 0 { return nil @@ -270,6 +283,7 @@ func (db *DB) MSet(args ...KVPair) error { return err } +// Set sets the data. func (db *DB) Set(key []byte, value []byte) error { if err := checkKeySize(key); err != nil { return err @@ -292,6 +306,7 @@ func (db *DB) Set(key []byte, value []byte) error { return err } +// SetNX sets the data if not existed. func (db *DB) SetNX(key []byte, value []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -322,6 +337,7 @@ func (db *DB) SetNX(key []byte, value []byte) (int64, error) { return n, err } +// SetEX sets the data with a TTL. func (db *DB) SetEX(key []byte, duration int64, value []byte) error { if err := checkKeySize(key); err != nil { return err @@ -341,11 +357,7 @@ func (db *DB) SetEX(key []byte, duration int64, value []byte) error { t.Put(ek, value) db.expireAt(t, KVType, key, time.Now().Unix()+duration) - if err := t.Commit(); err != nil { - return err - } - - return nil + return t.Commit() } func (db *DB) flush() (drop int64, err error) { @@ -355,6 +367,7 @@ func (db *DB) flush() (drop int64, err error) { return db.flushType(t, KVType) } +// Expire expires the data. func (db *DB) Expire(key []byte, duration int64) (int64, error) { if duration <= 0 { return 0, errExpireValue @@ -363,6 +376,7 @@ func (db *DB) Expire(key []byte, duration int64) (int64, error) { return db.setExpireAt(key, time.Now().Unix()+duration) } +// ExpireAt expires the data at when. func (db *DB) ExpireAt(key []byte, when int64) (int64, error) { if when <= time.Now().Unix() { return 0, errExpireValue @@ -371,6 +385,7 @@ func (db *DB) ExpireAt(key []byte, when int64) (int64, error) { return db.setExpireAt(key, when) } +// TTL returns the TTL of the data. func (db *DB) TTL(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return -1, err @@ -379,6 +394,7 @@ func (db *DB) TTL(key []byte) (int64, error) { return db.ttl(KVType, key) } +// Persist removes the TTL of the data. func (db *DB) Persist(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -396,6 +412,7 @@ func (db *DB) Persist(key []byte) (int64, error) { return n, err } +// SetRange sets the data with new value from offset. func (db *DB) SetRange(key []byte, offset int, value []byte) (int64, error) { if len(value) == 0 { return 0, nil @@ -458,6 +475,7 @@ func getRange(start int, end int, valLen int) (int, int) { return start, end } +// GetRange gets the range of the data. func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -480,6 +498,7 @@ func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) { return value[start : end+1], nil } +// StrLen returns the length of the data. func (db *DB) StrLen(key []byte) (int64, error) { s, err := db.GetSlice(key) if err != nil { @@ -491,6 +510,7 @@ func (db *DB) StrLen(key []byte) (int64, error) { return int64(n), nil } +// Append appends the value to the data. func (db *DB) Append(key []byte, value []byte) (int64, error) { if len(value) == 0 { return 0, nil @@ -526,6 +546,7 @@ func (db *DB) Append(key []byte, value []byte) (int64, error) { return int64(len(oldValue)), nil } +// BitOP does the bit operations in data. func (db *DB) BitOP(op string, destKey []byte, srcKeys ...[]byte) (int64, error) { if err := checkKeySize(destKey); err != nil { return 0, err @@ -628,6 +649,7 @@ func numberBitCount(i uint32) uint32 { return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24 } +// BitCount returns the bit count of data. func (db *DB) BitCount(key []byte, start int, end int) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -642,7 +664,7 @@ func (db *DB) BitCount(key []byte, start int, end int) (int64, error) { start, end = getRange(start, end, len(value)) value = value[start : end+1] - var n int64 = 0 + var n int64 pos := 0 for ; pos+4 <= len(value); pos = pos + 4 { @@ -656,6 +678,7 @@ func (db *DB) BitCount(key []byte, start int, end int) (int64, error) { return n, nil } +// BitPos returns the pos of the data. func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -665,7 +688,7 @@ func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) { return 0, fmt.Errorf("bit must be 0 or 1, not %d", on) } - var skipValue uint8 = 0 + var skipValue uint8 if on == 0 { skipValue = 0xFF } @@ -694,6 +717,7 @@ func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) { return -1, nil } +// SetBit sets the bit to the data. func (db *DB) SetBit(key []byte, offset int, on int) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -736,11 +760,12 @@ func (db *DB) SetBit(key []byte, offset int, on int) (int64, error) { if bitVal > 0 { return 1, nil - } else { - return 0, nil } + + return 0, nil } +// GetBit gets the bit of data at offset. func (db *DB) GetBit(key []byte, offset int) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -763,7 +788,7 @@ func (db *DB) GetBit(key []byte, offset int) (int64, error) { bitVal := value[byteOffset] & (1 << bit) if bitVal > 0 { return 1, nil - } else { - return 0, nil } + + return 0, nil } diff --git a/ledis/t_list.go b/ledis/t_list.go index 83ed56e..c177cd9 100644 --- a/ledis/t_list.go +++ b/ledis/t_list.go @@ -121,12 +121,12 @@ func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) { return 0, err } - var pushCnt int = len(args) + pushCnt := len(args) if pushCnt == 0 { return int64(size), nil } - var seq int32 = headSeq + seq := headSeq var delta int32 = -1 if whereSeq == listTailSeq { seq = tailSeq @@ -190,7 +190,7 @@ func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) { var value []byte - var seq int32 = headSeq + seq := headSeq if whereSeq == listTailSeq { seq = tailSeq } @@ -202,9 +202,9 @@ func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) { } if whereSeq == listHeadSeq { - headSeq += 1 + headSeq++ } else { - tailSeq -= 1 + tailSeq-- } t.Delete(itemKey) @@ -234,25 +234,25 @@ func (db *DB) ltrim2(key []byte, startP, stopP int64) (err error) { ek := db.lEncodeMetaKey(key) if headSeq, _, llen, err = db.lGetMeta(nil, ek); err != nil { return err - } else { - if start < 0 { - start = llen + start - } - if stop < 0 { - stop = llen + stop - } - if start >= llen || start > stop { - db.lDelete(t, key) - db.rmExpire(t, ListType, key) - return t.Commit() - } + } - if start < 0 { - start = 0 - } - if stop >= llen { - stop = llen - 1 - } + if start < 0 { + start = llen + start + } + if stop < 0 { + stop = llen + stop + } + if start >= llen || start > stop { + db.lDelete(t, key) + db.rmExpire(t, ListType, key) + return t.Commit() + } + + if start < 0 { + start = 0 + } + if stop >= llen { + stop = llen - 1 } if start > 0 { @@ -343,11 +343,14 @@ func (db *DB) lDelete(t *batch, key []byte) int64 { return 0 } - var num int64 = 0 + var num int64 startKey := db.lEncodeListKey(key, headSeq) stopKey := db.lEncodeListKey(key, tailSeq) - rit := store.NewRangeIterator(it, &store.Range{startKey, stopKey, store.RangeClose}) + rit := store.NewRangeIterator(it, &store.Range{ + Min: startKey, + Max: stopKey, + Type: store.RangeClose}) for ; rit.Valid(); rit.Next() { t.Delete(rit.RawKey()) num++ @@ -383,7 +386,7 @@ func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq in func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 { t := db.listBatch - var size int32 = tailSeq - headSeq + 1 + size := tailSeq - headSeq + 1 if size < 0 { // todo : log error + panic log.Fatalf("invalid meta sequence range [%d, %d]", headSeq, tailSeq) @@ -408,15 +411,17 @@ func (db *DB) lExpireAt(key []byte, when int64) (int64, error) { if llen, err := db.LLen(key); err != nil || llen == 0 { return 0, err - } else { - db.expireAt(t, ListType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } } + + db.expireAt(t, ListType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + return 1, nil } +// LIndex returns the value at index. func (db *DB) LIndex(key []byte, index int32) ([]byte, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -449,6 +454,7 @@ func (db *DB) LIndex(key []byte, index int32) ([]byte, error) { return v, nil } +// LLen gets the length of the list. func (db *DB) LLen(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -459,25 +465,32 @@ func (db *DB) LLen(key []byte) (int64, error) { return int64(size), err } +// LPop pops the value. func (db *DB) LPop(key []byte) ([]byte, error) { return db.lpop(key, listHeadSeq) } +// LTrim trims the value from start to stop. func (db *DB) LTrim(key []byte, start, stop int64) error { return db.ltrim2(key, start, stop) } +// LTrimFront trims the value from top. func (db *DB) LTrimFront(key []byte, trimSize int32) (int32, error) { return db.ltrim(key, trimSize, listHeadSeq) } +// LTrimBack trims the value from back. func (db *DB) LTrimBack(key []byte, trimSize int32) (int32, error) { return db.ltrim(key, trimSize, listTailSeq) } +// LPush push the value to the list. func (db *DB) LPush(key []byte, args ...[]byte) (int64, error) { return db.lpush(key, listHeadSeq, args...) } + +// LSet sets the value at index. func (db *DB) LSet(key []byte, index int32, value []byte) error { if err := checkKeySize(key); err != nil { return err @@ -512,6 +525,7 @@ func (db *DB) LSet(key []byte, index int32, value []byte) error { return err } +// LRange gets the value of list at range. func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -570,14 +584,17 @@ func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) { return v, nil } +// RPop rpops the value. func (db *DB) RPop(key []byte) ([]byte, error) { return db.lpop(key, listTailSeq) } +// RPush rpushs the value . func (db *DB) RPush(key []byte, args ...[]byte) (int64, error) { return db.lpush(key, listTailSeq, args...) } +// LClear clears the list. func (db *DB) LClear(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -594,6 +611,7 @@ func (db *DB) LClear(key []byte) (int64, error) { return num, err } +// LMclear clears multi lists. func (db *DB) LMclear(keys ...[]byte) (int64, error) { t := db.listBatch t.Lock() @@ -620,6 +638,7 @@ func (db *DB) lFlush() (drop int64, err error) { return db.flushType(t, ListType) } +// LExpire expires the list. func (db *DB) LExpire(key []byte, duration int64) (int64, error) { if duration <= 0 { return 0, errExpireValue @@ -628,6 +647,7 @@ func (db *DB) LExpire(key []byte, duration int64) (int64, error) { return db.lExpireAt(key, time.Now().Unix()+duration) } +// LExpireAt expires the list at when. func (db *DB) LExpireAt(key []byte, when int64) (int64, error) { if when <= time.Now().Unix() { return 0, errExpireValue @@ -636,6 +656,7 @@ func (db *DB) LExpireAt(key []byte, when int64) (int64, error) { return db.lExpireAt(key, when) } +// LTTL gets the TTL of list. func (db *DB) LTTL(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return -1, err @@ -644,6 +665,7 @@ func (db *DB) LTTL(key []byte) (int64, error) { return db.ttl(ListType, key) } +// LPersist removes the TTL of list. func (db *DB) LPersist(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -672,14 +694,17 @@ func (db *DB) lEncodeMaxKey() []byte { return ek } +// BLPop pops the list with block way. func (db *DB) BLPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) { return db.lblockPop(keys, listHeadSeq, timeout) } +// BRPop bpops the list with block way. func (db *DB) BRPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) { return db.lblockPop(keys, listTailSeq, timeout) } +// LKeyExists check list existed or not. func (db *DB) LKeyExists(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err diff --git a/ledis/t_set.go b/ledis/t_set.go index a4eaf95..926d4ea 100644 --- a/ledis/t_set.go +++ b/ledis/t_set.go @@ -12,6 +12,7 @@ import ( var errSetKey = errors.New("invalid set key") var errSSizeKey = errors.New("invalid ssize key") +// For set operation type. const ( setStartSep byte = ':' setStopSep byte = setStartSep + 1 @@ -137,7 +138,7 @@ func (db *DB) sDelete(t *batch, key []byte) int64 { start := db.sEncodeStartKey(key) stop := db.sEncodeStopKey(key) - var num int64 = 0 + var num int64 it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) for ; it.Valid(); it.Next() { t.Delete(it.RawKey()) @@ -155,18 +156,18 @@ func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) { sk := db.sEncodeSizeKey(key) var err error - var size int64 = 0 + var size int64 if size, err = Int64(db.bucket.Get(sk)); err != nil { return 0, err + } + + size += delta + if size <= 0 { + size = 0 + t.Delete(sk) + db.rmExpire(t, SetType, key) } else { - size += delta - if size <= 0 { - size = 0 - t.Delete(sk) - db.rmExpire(t, SetType, key) - } else { - t.Put(sk, PutInt64(size)) - } + t.Put(sk, PutInt64(size)) } return size, nil @@ -179,12 +180,10 @@ func (db *DB) sExpireAt(key []byte, when int64) (int64, error) { if scnt, err := db.SCard(key); err != nil || scnt == 0 { return 0, err - } else { - db.expireAt(t, SetType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } - + } + db.expireAt(t, SetType, key, when) + if err := t.Commit(); err != nil { + return 0, err } return 1, nil @@ -207,6 +206,7 @@ func (db *DB) sSetItem(key []byte, member []byte) (int64, error) { return n, nil } +// SAdd adds the value to the set. func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) { t := db.setBatch t.Lock() @@ -214,7 +214,7 @@ func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) { var err error var ek []byte - var num int64 = 0 + var num int64 for i := 0; i < len(args); i++ { if err := checkSetKMSize(key, args[i]); err != nil { return 0, err @@ -240,6 +240,7 @@ func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) { } +// SCard gets the size of set. func (db *DB) SCard(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -294,16 +295,19 @@ func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) { return slice, nil } +// SDiff gets the different of sets. func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) { v, err := db.sDiffGeneric(keys...) return v, err } +// SDiffStore gets the different of sets and stores to dest set. func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) { n, err := db.sStoreGeneric(dstKey, DiffType, keys...) return n, err } +// SKeyExists checks whether set existed or not. func (db *DB) SKeyExists(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -370,17 +374,20 @@ func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) { } +// SInter intersects the sets. func (db *DB) SInter(keys ...[]byte) ([][]byte, error) { v, err := db.sInterGeneric(keys...) return v, err } +// SInterStore intersects the sets and stores to dest set. func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) { n, err := db.sStoreGeneric(dstKey, InterType, keys...) return n, err } +// SIsMember checks member in set. func (db *DB) SIsMember(key []byte, member []byte) (int64, error) { ek := db.sEncodeSetKey(key, member) @@ -393,6 +400,7 @@ func (db *DB) SIsMember(key []byte, member []byte) (int64, error) { return n, nil } +// SMembers gets members of set. func (db *DB) SMembers(key []byte) ([][]byte, error) { if err := checkKeySize(key); err != nil { return nil, err @@ -418,6 +426,7 @@ func (db *DB) SMembers(key []byte) ([][]byte, error) { return v, nil } +// SRem removes the members of set. func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) { t := db.setBatch t.Lock() @@ -430,7 +439,7 @@ func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) { it := db.bucket.NewIterator() defer it.Close() - var num int64 = 0 + var num int64 for i := 0; i < len(args); i++ { if err := checkSetKMSize(key, args[i]); err != nil { return 0, err @@ -487,11 +496,13 @@ func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) { return slice, nil } +// SUnion unions the sets. func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) { v, err := db.sUnionGeneric(keys...) return v, err } +// SUnionStore unions the sets and stores to the dest set. func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) { n, err := db.sStoreGeneric(dstKey, UnionType, keys...) return n, err @@ -549,6 +560,7 @@ func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, return n, nil } +// SClear clears the set. func (db *DB) SClear(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -565,6 +577,7 @@ func (db *DB) SClear(key []byte) (int64, error) { return num, err } +// SMclear clears multi sets. func (db *DB) SMclear(keys ...[]byte) (int64, error) { t := db.setBatch t.Lock() @@ -583,6 +596,7 @@ func (db *DB) SMclear(keys ...[]byte) (int64, error) { return int64(len(keys)), err } +// SExpire expries the set. func (db *DB) SExpire(key []byte, duration int64) (int64, error) { if duration <= 0 { return 0, errExpireValue @@ -592,6 +606,7 @@ func (db *DB) SExpire(key []byte, duration int64) (int64, error) { } +// SExpireAt expires the set at when. func (db *DB) SExpireAt(key []byte, when int64) (int64, error) { if when <= time.Now().Unix() { return 0, errExpireValue @@ -601,6 +616,7 @@ func (db *DB) SExpireAt(key []byte, when int64) (int64, error) { } +// STTL gets the TTL of set. func (db *DB) STTL(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return -1, err @@ -609,6 +625,7 @@ func (db *DB) STTL(key []byte) (int64, error) { return db.ttl(SetType, key) } +// SPersist removes the TTL of set. func (db *DB) SPersist(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err diff --git a/ledis/t_ttl.go b/ledis/t_ttl.go index 2c979ae..126c8f6 100644 --- a/ledis/t_ttl.go +++ b/ledis/t_ttl.go @@ -119,18 +119,22 @@ func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) { func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) { mk := db.expEncodeMetaKey(dataType, key) - if v, err := db.bucket.Get(mk); err != nil { + v, err := db.bucket.Get(mk) + if err != nil { return 0, err } else if v == nil { return 0, nil - } else if when, err2 := Int64(v, nil); err2 != nil { - return 0, err2 - } else { - tk := db.expEncodeTimeKey(dataType, key, when) - t.Delete(mk) - t.Delete(tk) - return 1, nil } + + when, err2 := Int64(v, nil) + if err2 != nil { + return 0, err2 + } + + tk := db.expEncodeTimeKey(dataType, key, when) + t.Delete(mk) + t.Delete(tk) + return 1, nil } func (c *ttlChecker) register(dataType byte, t *batch, f onExpired) { diff --git a/ledis/t_ttl_test.go b/ledis/t_ttl_test.go index 1c54a29..dd555ad 100644 --- a/ledis/t_ttl_test.go +++ b/ledis/t_ttl_test.go @@ -55,19 +55,19 @@ func listAdaptor(db *DB) *adaptor { eles = append(eles, e) } - if n, err := db.LPush(k, eles...); err != nil { + n, err := db.LPush(k, eles...) + if err != nil { return 0, err - } else { - return n, nil } + + return n, nil } adp.exists = func(k []byte) (int64, error) { if llen, err := db.LLen(k); err != nil || llen <= 0 { return 0, err - } else { - return 1, nil } + return 1, nil } adp.del = db.LClear @@ -97,17 +97,15 @@ func hashAdaptor(db *DB) *adaptor { if err := db.HMset(k, datas...); err != nil { return 0, err - } else { - return int64(len(datas)), nil } + return int64(len(datas)), nil } adp.exists = func(k []byte) (int64, error) { if hlen, err := db.HLen(k); err != nil || hlen <= 0 { return 0, err - } else { - return 1, nil } + return 1, nil } adp.del = db.HClear @@ -135,19 +133,18 @@ func zsetAdaptor(db *DB) *adaptor { datas = append(datas, pair) } - if n, err := db.ZAdd(k, datas...); err != nil { + n, err := db.ZAdd(k, datas...) + if err != nil { return 0, err - } else { - return n, nil } + return n, nil } adp.exists = func(k []byte) (int64, error) { if cnt, err := db.ZCard(k); err != nil || cnt <= 0 { return 0, err - } else { - return 1, nil } + return 1, nil } adp.del = db.ZClear @@ -171,20 +168,19 @@ func setAdaptor(db *DB) *adaptor { eles = append(eles, e) } - if n, err := db.SAdd(k, eles...); err != nil { + n, err := db.SAdd(k, eles...) + if err != nil { return 0, err - } else { - return n, nil } - + return n, nil } adp.exists = func(k []byte) (int64, error) { if slen, err := db.SCard(k); err != nil || slen <= 0 { return 0, err - } else { - return 1, nil } + + return 1, nil } adp.del = db.SClear diff --git a/ledis/t_zset.go b/ledis/t_zset.go index fe11df8..030b297 100644 --- a/ledis/t_zset.go +++ b/ledis/t_zset.go @@ -10,6 +10,7 @@ import ( "github.com/siddontang/ledisdb/store" ) +// For zset const. const ( MinScore int64 = -1<<63 + 1 MaxScore int64 = 1<<63 - 1 @@ -20,6 +21,7 @@ const ( AggregateMax byte = 2 ) +// ScorePair is the pair of score and member. type ScorePair struct { Score int64 Member []byte @@ -238,7 +240,7 @@ func (db *DB) zSetItem(t *batch, key []byte, score int64, member []byte) (int64, return 0, errScoreOverflow } - var exists int64 = 0 + var exists int64 ek := db.zEncodeSetKey(key, member) if v, err := db.bucket.Get(ek); err != nil { @@ -246,12 +248,13 @@ func (db *DB) zSetItem(t *batch, key []byte, score int64, member []byte) (int64, } else if v != nil { exists = 1 - if s, err := Int64(v, err); err != nil { + s, err := Int64(v, err) + if err != nil { return 0, err - } else { - sk := db.zEncodeScoreKey(key, member, s) - t.Delete(sk) } + + sk := db.zEncodeScoreKey(key, member, s) + t.Delete(sk) } t.Put(ek, PutInt64(score)) @@ -273,12 +276,12 @@ func (db *DB) zDelItem(t *batch, key []byte, member []byte, skipDelScore bool) ( //exists if !skipDelScore { //we must del score - if s, err := Int64(v, err); err != nil { + s, err := Int64(v, err) + if err != nil { return 0, err - } else { - sk := db.zEncodeScoreKey(key, member, s) - t.Delete(sk) } + sk := db.zEncodeScoreKey(key, member, s) + t.Delete(sk) } } @@ -300,15 +303,17 @@ func (db *DB) zExpireAt(key []byte, when int64) (int64, error) { if zcnt, err := db.ZCard(key); err != nil || zcnt == 0 { return 0, err - } else { - db.expireAt(t, ZSetType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } } + + db.expireAt(t, ZSetType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + return 1, nil } +// ZAdd add the members. func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) { if len(args) == 0 { return 0, nil @@ -318,7 +323,7 @@ func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) { t.Lock() defer t.Unlock() - var num int64 = 0 + var num int64 for i := 0; i < len(args); i++ { score := args[i].Score member := args[i].Member @@ -349,20 +354,20 @@ func (db *DB) zIncrSize(t *batch, key []byte, delta int64) (int64, error) { size, err := Int64(db.bucket.Get(sk)) if err != nil { return 0, err + } + size += delta + if size <= 0 { + size = 0 + t.Delete(sk) + db.rmExpire(t, ZSetType, key) } else { - size += delta - if size <= 0 { - size = 0 - t.Delete(sk) - db.rmExpire(t, ZSetType, key) - } else { - t.Put(sk, PutInt64(size)) - } + t.Put(sk, PutInt64(size)) } return size, nil } +// ZCard gets the size of the zset. func (db *DB) ZCard(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -372,12 +377,13 @@ func (db *DB) ZCard(key []byte) (int64, error) { return Int64(db.bucket.Get(sk)) } +// ZScore gets the score of member. func (db *DB) ZScore(key []byte, member []byte) (int64, error) { if err := checkZSetKMSize(key, member); err != nil { return InvalidScore, err } - var score int64 = InvalidScore + score := InvalidScore k := db.zEncodeSetKey(key, member) if v, err := db.bucket.Get(k); err != nil { @@ -393,6 +399,7 @@ func (db *DB) ZScore(key []byte, member []byte) (int64, error) { return score, nil } +// ZRem removes members func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) { if len(members) == 0 { return 0, nil @@ -402,7 +409,7 @@ func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) { t.Lock() defer t.Unlock() - var num int64 = 0 + var num int64 for i := 0; i < len(members); i++ { if err := checkZSetKMSize(key, members[i]); err != nil { return 0, err @@ -423,6 +430,7 @@ func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) { return num, err } +// ZIncrBy increases the score of member with delta. func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) { if err := checkZSetKMSize(key, member); err != nil { return InvalidScore, err @@ -434,7 +442,7 @@ func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) { ek := db.zEncodeSetKey(key, member) - var oldScore int64 = 0 + var oldScore int64 v, err := db.bucket.Get(ek) if err != nil { return InvalidScore, err @@ -465,6 +473,7 @@ func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) { return newScore, err } +// ZCount gets the number of score in [min, max] func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -475,7 +484,7 @@ func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) { rangeType := store.RangeROpen it := db.bucket.RangeLimitIterator(minKey, maxKey, rangeType, 0, -1) - var n int64 = 0 + var n int64 for ; it.Valid(); it.Next() { n++ } @@ -494,39 +503,40 @@ func (db *DB) zrank(key []byte, member []byte, reverse bool) (int64, error) { it := db.bucket.NewIterator() defer it.Close() - if v := it.Find(k); v == nil { + v := it.Find(k) + if v == nil { return -1, nil + } + + s, err := Int64(v, nil) + if err != nil { + return 0, err + } + var rit *store.RangeLimitIterator + + sk := db.zEncodeScoreKey(key, member, s) + + if !reverse { + minKey := db.zEncodeStartScoreKey(key, MinScore) + + rit = store.NewRangeIterator(it, &store.Range{Min: minKey, Max: sk, Type: store.RangeClose}) } else { - if s, err := Int64(v, nil); err != nil { - return 0, err - } else { - var rit *store.RangeLimitIterator + maxKey := db.zEncodeStopScoreKey(key, MaxScore) + rit = store.NewRevRangeIterator(it, &store.Range{Min: sk, Max: maxKey, Type: store.RangeClose}) + } - sk := db.zEncodeScoreKey(key, member, s) + var lastKey []byte + var n int64 - if !reverse { - minKey := db.zEncodeStartScoreKey(key, MinScore) + for ; rit.Valid(); rit.Next() { + n++ - rit = store.NewRangeIterator(it, &store.Range{minKey, sk, store.RangeClose}) - } else { - maxKey := db.zEncodeStopScoreKey(key, MaxScore) - rit = store.NewRevRangeIterator(it, &store.Range{sk, maxKey, store.RangeClose}) - } + lastKey = rit.BufKey(lastKey) + } - var lastKey []byte = nil - var n int64 = 0 - - for ; rit.Valid(); rit.Next() { - n++ - - lastKey = rit.BufKey(lastKey) - } - - if _, m, _, err := db.zDecodeScoreKey(lastKey); err == nil && bytes.Equal(m, member) { - n-- - return n, nil - } - } + if _, m, _, err := db.zDecodeScoreKey(lastKey); err == nil && bytes.Equal(m, member) { + n-- + return n, nil } return -1, nil @@ -538,9 +548,8 @@ func (db *DB) zIterator(key []byte, min int64, max int64, offset int, count int, if !reverse { return db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) - } else { - return db.bucket.RevRangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) } + return db.bucket.RevRangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) } func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int, count int) (int64, error) { @@ -549,7 +558,7 @@ func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int, } it := db.zIterator(key, min, max, offset, count, false) - var num int64 = 0 + var num int64 for ; it.Valid(); it.Next() { sk := it.RawKey() _, m, _, err := db.zDecodeScoreKey(sk) @@ -659,6 +668,7 @@ func (db *DB) zParseLimit(key []byte, start int, stop int) (offset int, count in return } +// ZClear clears the zset. func (db *DB) ZClear(key []byte) (int64, error) { t := db.zsetBatch t.Lock() @@ -672,6 +682,7 @@ func (db *DB) ZClear(key []byte) (int64, error) { return rmCnt, err } +// ZMclear clears multi zsets. func (db *DB) ZMclear(keys ...[]byte) (int64, error) { t := db.zsetBatch t.Lock() @@ -688,21 +699,25 @@ func (db *DB) ZMclear(keys ...[]byte) (int64, error) { return int64(len(keys)), err } +// ZRange gets the members from start to stop. func (db *DB) ZRange(key []byte, start int, stop int) ([]ScorePair, error) { return db.ZRangeGeneric(key, start, stop, false) } -//min and max must be inclusive -//if no limit, set offset = 0 and count = -1 +// ZRangeByScore gets the data with score in min and max. +// min and max must be inclusive +// if no limit, set offset = 0 and count = -1 func (db *DB) ZRangeByScore(key []byte, min int64, max int64, offset int, count int) ([]ScorePair, error) { return db.ZRangeByScoreGeneric(key, min, max, offset, count, false) } +// ZRank gets the rank of member. func (db *DB) ZRank(key []byte, member []byte) (int64, error) { return db.zrank(key, member, false) } +// ZRemRangeByRank removes the member at range from start to stop. func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) { offset, count, err := db.zParseLimit(key, start, stop) if err != nil { @@ -723,7 +738,7 @@ func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) { return rmCnt, err } -//min and max must be inclusive +// ZRemRangeByScore removes the data with score at [min, max] func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error) { t := db.zsetBatch t.Lock() @@ -737,20 +752,24 @@ func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error) return rmCnt, err } +// ZRevRange gets the data reversed. func (db *DB) ZRevRange(key []byte, start int, stop int) ([]ScorePair, error) { return db.ZRangeGeneric(key, start, stop, true) } +// ZRevRank gets the rank of member reversed. func (db *DB) ZRevRank(key []byte, member []byte) (int64, error) { return db.zrank(key, member, true) } -//min and max must be inclusive -//if no limit, set offset = 0 and count = -1 +// ZRevRangeByScore gets the data with score at [min, max] +// min and max must be inclusive +// if no limit, set offset = 0 and count = -1 func (db *DB) ZRevRangeByScore(key []byte, min int64, max int64, offset int, count int) ([]ScorePair, error) { return db.ZRangeByScoreGeneric(key, min, max, offset, count, true) } +// ZRangeGeneric is a generic function for scan zset. func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]ScorePair, error) { offset, count, err := db.zParseLimit(key, start, stop) if err != nil { @@ -760,8 +779,9 @@ func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]Sc return db.zRange(key, MinScore, MaxScore, offset, count, reverse) } -//min and max must be inclusive -//if no limit, set offset = 0 and count = -1 +// ZRangeByScoreGeneric is a generic function to scan zset with score. +// min and max must be inclusive +// if no limit, set offset = 0 and count = -1 func (db *DB) ZRangeByScoreGeneric(key []byte, min int64, max int64, offset int, count int, reverse bool) ([]ScorePair, error) { @@ -775,6 +795,7 @@ func (db *DB) zFlush() (drop int64, err error) { return db.flushType(t, ZSetType) } +// ZExpire expires the zset. func (db *DB) ZExpire(key []byte, duration int64) (int64, error) { if duration <= 0 { return 0, errExpireValue @@ -783,6 +804,7 @@ func (db *DB) ZExpire(key []byte, duration int64) (int64, error) { return db.zExpireAt(key, time.Now().Unix()+duration) } +// ZExpireAt expires the zset at when. func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) { if when <= time.Now().Unix() { return 0, errExpireValue @@ -791,6 +813,7 @@ func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) { return db.zExpireAt(key, when) } +// ZTTL gets the TTL of zset. func (db *DB) ZTTL(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return -1, err @@ -799,6 +822,7 @@ func (db *DB) ZTTL(key []byte) (int64, error) { return db.ttl(ZSetType, key) } +// ZPersist removes the TTL of zset. func (db *DB) ZPersist(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err @@ -841,6 +865,7 @@ func getAggregateFunc(aggregate byte) func(int64, int64) int64 { return nil } +// ZUnionStore unions the zsets and stores to dest zset. func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { var destMap = map[string]int64{} @@ -902,6 +927,7 @@ func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, agg return n, nil } +// ZInterStore intersects the zsets and stores to dest zset. func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { aggregateFunc := getAggregateFunc(aggregate) @@ -960,7 +986,7 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg } } - var n int64 = int64(len(destMap)) + n := int64(len(destMap)) sk := db.zEncodeSizeKey(destKey) t.Put(sk, PutInt64(n)) @@ -970,6 +996,7 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg return n, nil } +// ZRangeByLex scans the zset lexicographically func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, offset int, count int) ([][]byte, error) { if min == nil { min = db.zEncodeStartSetKey(key) @@ -995,6 +1022,7 @@ func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, o return ay, nil } +// ZRemRangeByLex remvoes members in [min, max] lexicographically func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { if min == nil { min = db.zEncodeStartSetKey(key) @@ -1014,7 +1042,7 @@ func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8 it := db.bucket.RangeIterator(min, max, rangeType) defer it.Close() - var n int64 = 0 + var n int64 for ; it.Valid(); it.Next() { t.Delete(it.RawKey()) n++ @@ -1027,6 +1055,7 @@ func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8 return n, nil } +// ZLexCount gets the count of zset lexicographically. func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { if min == nil { min = db.zEncodeStartSetKey(key) @@ -1042,7 +1071,7 @@ func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (in it := db.bucket.RangeIterator(min, max, rangeType) defer it.Close() - var n int64 = 0 + var n int64 for ; it.Valid(); it.Next() { n++ } @@ -1050,6 +1079,7 @@ func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (in return n, nil } +// ZKeyExists checks zset existed or not. func (db *DB) ZKeyExists(key []byte) (int64, error) { if err := checkKeySize(key); err != nil { return 0, err diff --git a/ledis/t_zset_test.go b/ledis/t_zset_test.go index cc0c2b6..b7513a5 100644 --- a/ledis/t_zset_test.go +++ b/ledis/t_zset_test.go @@ -155,7 +155,7 @@ func TestZSetOrder(t *testing.T) { } else { for i := 0; i < membCnt; i++ { if string(qMembs[i].Member) != membs[i] { - t.Fatal(fmt.Sprintf("[%s] vs [%s]", qMembs[i], membs[i])) + t.Fatalf("[%v] vs [%v]", qMembs[i], membs[i]) } } } diff --git a/ledis/util.go b/ledis/util.go index 26ee6d0..5a054c4 100644 --- a/ledis/util.go +++ b/ledis/util.go @@ -15,6 +15,7 @@ var errIntNumber = errors.New("invalid integer") Maybe I was foolish at that time. */ +// Int64 gets 64 integer with the little endian format. func Int64(v []byte, err error) (int64, error) { if err != nil { return 0, err @@ -27,6 +28,7 @@ func Int64(v []byte, err error) (int64, error) { return int64(binary.LittleEndian.Uint64(v)), nil } +// Uint64 gets unsigned 64 integer. func Uint64(v []byte, err error) (uint64, error) { if err != nil { return 0, err @@ -39,12 +41,14 @@ func Uint64(v []byte, err error) (uint64, error) { return binary.LittleEndian.Uint64(v), nil } +// PutInt64 puts the 64 integer. func PutInt64(v int64) []byte { b := make([]byte, 8) binary.LittleEndian.PutUint64(b, uint64(v)) return b } +// StrInt64 gets the 64 integer with string format. func StrInt64(v []byte, err error) (int64, error) { if err != nil { return 0, err @@ -55,6 +59,7 @@ func StrInt64(v []byte, err error) (int64, error) { } } +// StrUint64 gets the unsigned 64 integer with string format. func StrUint64(v []byte, err error) (uint64, error) { if err != nil { return 0, err @@ -65,6 +70,7 @@ func StrUint64(v []byte, err error) (uint64, error) { } } +// StrInt32 gets the 32 integer with string format. func StrInt32(v []byte, err error) (int32, error) { if err != nil { return 0, err @@ -76,6 +82,7 @@ func StrInt32(v []byte, err error) (int32, error) { } } +// StrInt8 ets the 8 integer with string format. func StrInt8(v []byte, err error) (int8, error) { if err != nil { return 0, err @@ -87,6 +94,7 @@ func StrInt8(v []byte, err error) (int8, error) { } } +// AsyncNotify notices the channel. func AsyncNotify(ch chan struct{}) { select { case ch <- struct{}{}: diff --git a/server/cmd_hash_test.go b/server/cmd_hash_test.go index f9d19be..17d8317 100644 --- a/server/cmd_hash_test.go +++ b/server/cmd_hash_test.go @@ -234,78 +234,78 @@ func TestHashErrorParams(t *testing.T) { defer c.Close() if _, err := c.Do("hset", "test_hset"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hget", "test_hget"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hexists", "test_hexists"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hdel", "test_hdel"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hlen", "test_hlen", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hincrby", "test_hincrby"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hmset", "test_hmset"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hmset", "test_hmset", "f1", "v1", "f2"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hmget", "test_hget"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hgetall"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hkeys"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hvals"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hclear"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hclear", "test_hclear", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hmclear"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hexpire", "test_hexpire"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hexpireat", "test_hexpireat"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("httl"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("hpersist"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } } diff --git a/server/cmd_kv_test.go b/server/cmd_kv_test.go index 6e8cf9d..2db0e1c 100644 --- a/server/cmd_kv_test.go +++ b/server/cmd_kv_test.go @@ -248,43 +248,43 @@ func TestKVErrorParams(t *testing.T) { } if _, err := c.Do("del"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("mset"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("mset", "a", "b", "c"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("mget"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("expire"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("expire", "a", "b"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("expireat"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("expireat", "a", "b"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("ttl"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("persist"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("setex", "a", "blah", "hello world"); err == nil { diff --git a/server/cmd_list_test.go b/server/cmd_list_test.go index 12f0b8a..616b31f 100644 --- a/server/cmd_list_test.go +++ b/server/cmd_list_test.go @@ -467,62 +467,62 @@ func TestListErrorParams(t *testing.T) { defer c.Close() if _, err := c.Do("lpush", "test_lpush"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("rpush", "test_rpush"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lpop", "test_lpop", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("rpop", "test_rpop", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("llen", "test_llen", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lindex", "test_lindex"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lrange", "test_lrange"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lclear"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lmclear"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lexpire"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lexpireat"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lttl"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("lpersist"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("ltrim_front", "test_ltrimfront", "-1"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("ltrim_back", "test_ltrimback", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } } diff --git a/server/cmd_set_test.go b/server/cmd_set_test.go index ed9d067..3ad9f8b 100644 --- a/server/cmd_set_test.go +++ b/server/cmd_set_test.go @@ -118,99 +118,99 @@ func TestSetErrorParams(t *testing.T) { defer c.Close() if _, err := c.Do("sadd", "test_sadd"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("scard"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("scard", "k1", "k2"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sdiff"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sdiffstore", "dstkey"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sinter"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sinterstore", "dstkey"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sunion"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sunionstore", "dstkey"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sismember", "k1"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sismember", "k1", "m1", "m2"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("smembers"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("smembers", "k1", "k2"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("srem"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("srem", "key"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sclear"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sclear", "k1", "k2"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("smclear"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sexpire", "set_expire"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sexpire", "set_expire", "aaa"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sexpireat", "set_expireat"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sexpireat", "set_expireat", "aaa"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("sttl"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("spersist"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } } diff --git a/server/cmd_zset_test.go b/server/cmd_zset_test.go index b0f4a55..1a673ff 100644 --- a/server/cmd_zset_test.go +++ b/server/cmd_zset_test.go @@ -451,166 +451,166 @@ func TestZsetErrorParams(t *testing.T) { //zadd if _, err := c.Do("zadd", "test_zadd"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zadd", "test_zadd", "a", "b", "c"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zadd", "test_zadd", "-a", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zadd", "test_zad", "0.1", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zcard if _, err := c.Do("zcard"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zscore if _, err := c.Do("zscore", "test_zscore"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zrem if _, err := c.Do("zrem", "test_zrem"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zincrby if _, err := c.Do("zincrby", "test_zincrby"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zincrby", "test_zincrby", 0.1, "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zcount if _, err := c.Do("zcount", "test_zcount"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zcount", "test_zcount", "-inf", "=inf"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zcount", "test_zcount", 0.1, 0.1); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zrank if _, err := c.Do("zrank", "test_zrank"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zrevzrank if _, err := c.Do("zrevrank", "test_zrevrank"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zremrangebyrank if _, err := c.Do("zremrangebyrank", "test_zremrangebyrank"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zremrangebyrank", "test_zremrangebyrank", 0.1, 0.1); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zremrangebyscore if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore", "-inf", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zremrangebyscore", "test_zremrangebyscore", 0, "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zrange if _, err := c.Do("zrange", "test_zrange"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zrange", "test_zrange", 0, 1, "withscore"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zrange", "test_zrange", 0, 1, "withscores", "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zrevrange, almost same as zrange if _, err := c.Do("zrevrange", "test_zrevrange"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zrangebyscore if _, err := c.Do("zrangebyscore", "test_zrangebyscore"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscore"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limi", 1, 1); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit", "a", 1); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } if _, err := c.Do("zrangebyscore", "test_zrangebyscore", 0, 1, "withscores", "limit", 1, "a"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zrevrangebyscore, almost same as zrangebyscore if _, err := c.Do("zrevrangebyscore", "test_zrevrangebyscore"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zclear if _, err := c.Do("zclear"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zmclear if _, err := c.Do("zmclear"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zexpire if _, err := c.Do("zexpire", "test_zexpire"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zexpireat if _, err := c.Do("zexpireat", "test_zexpireat"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zttl if _, err := c.Do("zttl"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } //zpersist if _, err := c.Do("zpersist"); err == nil { - t.Fatal("invalid err of %v", err) + t.Fatalf("invalid err of %v", err) } }