support large db index

This commit is contained in:
siddontang 2015-03-15 20:36:14 +08:00
parent 1b8d5af588
commit f51dd1712a
9 changed files with 257 additions and 97 deletions

View File

@ -18,7 +18,11 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) {
buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...) buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...)
db := new(DB) db := new(DB)
db.index = k[0] index, _, err := decodeDBIndex(k)
if err != nil {
return nil, err
}
db.setIndex(index)
//to do format at respective place //to do format at respective place

View File

@ -1,6 +1,8 @@
package ledis package ledis
import ( import (
"bytes"
"encoding/binary"
"fmt" "fmt"
"github.com/siddontang/ledisdb/store" "github.com/siddontang/ledisdb/store"
"sync" "sync"
@ -30,7 +32,10 @@ type DB struct {
bucket ibucket bucket ibucket
index uint8 index int
// buffer to store index varint
indexVarBuf []byte
kvBatch *batch kvBatch *batch
listBatch *batch listBatch *batch
@ -56,7 +61,7 @@ func (l *Ledis) newDB(index int) *DB {
d.bucket = d.sdb d.bucket = d.sdb
// d.status = DBAutoCommit // d.status = DBAutoCommit
d.index = uint8(index) d.setIndex(index)
d.kvBatch = d.newBatch() d.kvBatch = d.newBatch()
d.listBatch = d.newBatch() d.listBatch = d.newBatch()
@ -72,6 +77,37 @@ func (l *Ledis) newDB(index int) *DB {
return d return d
} }
func decodeDBIndex(buf []byte) (int, int, error) {
index, n := binary.Uvarint(buf)
if n == 0 {
return 0, 0, fmt.Errorf("buf is too small to save index")
} else if n < 0 {
return 0, 0, fmt.Errorf("value larger than 64 bits")
} else if index > uint64(MaxDatabases) {
return 0, 0, fmt.Errorf("value %d is larger than max databases %d", index, MaxDatabases)
}
return int(index), n, nil
}
func (db *DB) setIndex(index int) {
db.index = index
// the most size for varint is 10 bytes
buf := make([]byte, 10)
n := binary.PutUvarint(buf, uint64(index))
db.indexVarBuf = buf[0:n]
}
func (db *DB) checkKeyIndex(buf []byte) (int, error) {
if len(buf) < len(db.indexVarBuf) {
return 0, fmt.Errorf("key is too small")
} else if !bytes.Equal(db.indexVarBuf, buf[0:len(db.indexVarBuf)]) {
return 0, fmt.Errorf("invalid db index")
}
return len(db.indexVarBuf), nil
}
func (db *DB) newTTLChecker() *ttlChecker { func (db *DB) newTTLChecker() *ttlChecker {
c := new(ttlChecker) c := new(ttlChecker)
c.db = db c.db = db

View File

@ -178,11 +178,22 @@ func (db *DB) encodeScanKey(storeDataType byte, key []byte) ([]byte, error) {
} }
} }
func (db *DB) decodeScanKey(storeDataType byte, ek []byte) ([]byte, error) { func (db *DB) decodeScanKey(storeDataType byte, ek []byte) (key []byte, err error) {
if len(ek) < 2 || ek[0] != db.index || ek[1] != storeDataType { switch storeDataType {
return nil, errMetaKey case KVType:
key, err = db.decodeKVKey(ek)
case LMetaType:
key, err = db.lDecodeMetaKey(ek)
case HSizeType:
key, err = db.hDecodeSizeKey(ek)
case ZSizeType:
key, err = db.zDecodeSizeKey(ek)
case SSizeType:
key, err = db.sDecodeSizeKey(ek)
default:
err = errDataType
} }
return ek[2:], nil return
} }
// for specail data scan // for specail data scan

View File

@ -31,29 +31,41 @@ func checkHashKFSize(key []byte, field []byte) error {
} }
func (db *DB) hEncodeSizeKey(key []byte) []byte { func (db *DB) hEncodeSizeKey(key []byte) []byte {
buf := make([]byte, len(key)+2) buf := make([]byte, len(key)+1+len(db.indexVarBuf))
buf[0] = db.index pos := 0
buf[1] = HSizeType n := copy(buf, db.indexVarBuf)
pos += n
buf[pos] = HSizeType
pos++
copy(buf[pos:], key)
copy(buf[2:], key)
return buf return buf
} }
func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) { func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) {
if len(ek) < 2 || ek[0] != db.index || ek[1] != HSizeType { pos, err := db.checkKeyIndex(ek)
return nil, errHSizeKey if err != nil {
return nil, err
} }
return ek[2:], nil if pos+1 > len(ek) || ek[pos] != HSizeType {
return nil, errHSizeKey
}
pos++
return ek[pos:], nil
} }
func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte { func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte {
buf := make([]byte, len(key)+len(field)+1+1+2+1) buf := make([]byte, len(key)+len(field)+1+1+2+len(db.indexVarBuf))
pos := 0 pos := 0
buf[pos] = db.index n := copy(buf, db.indexVarBuf)
pos++ pos += n
buf[pos] = HashType buf[pos] = HashType
pos++ pos++
@ -71,15 +83,24 @@ func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte {
} }
func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) { func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) {
if len(ek) < 5 || ek[0] != db.index || ek[1] != HashType { pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, nil, err
}
if pos+1 > len(ek) || ek[pos] != HashType {
return nil, nil, errHashKey
}
pos++
if pos+2 > len(ek) {
return nil, nil, errHashKey return nil, nil, errHashKey
} }
pos := 2
keyLen := int(binary.BigEndian.Uint16(ek[pos:])) keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
pos += 2 pos += 2
if keyLen+5 > len(ek) { if keyLen+pos > len(ek) {
return nil, nil, errHashKey return nil, nil, errHashKey
} }

View File

@ -33,19 +33,26 @@ func checkValueSize(value []byte) error {
} }
func (db *DB) encodeKVKey(key []byte) []byte { func (db *DB) encodeKVKey(key []byte) []byte {
ek := make([]byte, len(key)+2) ek := make([]byte, len(key)+1+len(db.indexVarBuf))
ek[0] = db.index pos := copy(ek, db.indexVarBuf)
ek[1] = KVType ek[pos] = KVType
copy(ek[2:], key) pos++
copy(ek[pos:], key)
return ek return ek
} }
func (db *DB) decodeKVKey(ek []byte) ([]byte, error) { func (db *DB) decodeKVKey(ek []byte) ([]byte, error) {
if len(ek) < 2 || ek[0] != db.index || ek[1] != KVType { pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, err
}
if pos+1 > len(ek) || ek[pos] != KVType {
return nil, errKVKey return nil, errKVKey
} }
return ek[2:], nil pos++
return ek[pos:], nil
} }
func (db *DB) encodeKVMinKey() []byte { func (db *DB) encodeKVMinKey() []byte {

View File

@ -24,28 +24,34 @@ var errListKey = errors.New("invalid list key")
var errListSeq = errors.New("invalid list sequence, overflow") var errListSeq = errors.New("invalid list sequence, overflow")
func (db *DB) lEncodeMetaKey(key []byte) []byte { func (db *DB) lEncodeMetaKey(key []byte) []byte {
buf := make([]byte, len(key)+2) buf := make([]byte, len(key)+1+len(db.indexVarBuf))
buf[0] = db.index pos := copy(buf, db.indexVarBuf)
buf[1] = LMetaType buf[pos] = LMetaType
pos++
copy(buf[2:], key) copy(buf[pos:], key)
return buf return buf
} }
func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) { func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) {
if len(ek) < 2 || ek[0] != db.index || ek[1] != LMetaType { pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, err
}
if pos+1 > len(ek) || ek[pos] != LMetaType {
return nil, errLMetaKey return nil, errLMetaKey
} }
return ek[2:], nil pos++
return ek[pos:], nil
} }
func (db *DB) lEncodeListKey(key []byte, seq int32) []byte { func (db *DB) lEncodeListKey(key []byte, seq int32) []byte {
buf := make([]byte, len(key)+8) buf := make([]byte, len(key)+7+len(db.indexVarBuf))
pos := copy(buf, db.indexVarBuf)
pos := 0
buf[pos] = db.index
pos++
buf[pos] = ListType buf[pos] = ListType
pos++ pos++
@ -61,19 +67,33 @@ func (db *DB) lEncodeListKey(key []byte, seq int32) []byte {
} }
func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) { func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) {
if len(ek) < 8 || ek[0] != db.index || ek[1] != ListType { pos := 0
pos, err = db.checkKeyIndex(ek)
if err != nil {
return
}
if pos+1 > len(ek) || ek[pos] != ListType {
err = errListKey err = errListKey
return return
} }
keyLen := int(binary.BigEndian.Uint16(ek[2:])) pos++
if keyLen+8 != len(ek) {
if pos+2 > len(ek) {
err = errListKey err = errListKey
return return
} }
key = ek[4 : 4+keyLen] keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
seq = int32(binary.BigEndian.Uint32(ek[4+keyLen:])) pos += 2
if keyLen+pos+4 != len(ek) {
err = errListKey
return
}
key = ek[pos : pos+keyLen]
seq = int32(binary.BigEndian.Uint32(ek[pos+keyLen:]))
return return
} }

View File

@ -29,29 +29,36 @@ func checkSetKMSize(key []byte, member []byte) error {
} }
func (db *DB) sEncodeSizeKey(key []byte) []byte { func (db *DB) sEncodeSizeKey(key []byte) []byte {
buf := make([]byte, len(key)+2) buf := make([]byte, len(key)+1+len(db.indexVarBuf))
buf[0] = db.index pos := copy(buf, db.indexVarBuf)
buf[1] = SSizeType buf[pos] = SSizeType
copy(buf[2:], key) pos++
copy(buf[pos:], key)
return buf return buf
} }
func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) { func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) {
if len(ek) < 2 || ek[0] != db.index || ek[1] != SSizeType { pos, err := db.checkKeyIndex(ek)
return nil, errSSizeKey if err != nil {
return nil, err
} }
return ek[2:], nil if pos+1 > len(ek) || ek[pos] != SSizeType {
return nil, errSSizeKey
}
pos++
return ek[pos:], nil
} }
func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte { func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte {
buf := make([]byte, len(key)+len(member)+1+1+2+1) buf := make([]byte, len(key)+len(member)+1+1+2+len(db.indexVarBuf))
pos := copy(buf, db.indexVarBuf)
pos := 0
buf[pos] = db.index
pos++
buf[pos] = SetType buf[pos] = SetType
pos++ pos++
@ -69,15 +76,25 @@ func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte {
} }
func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) { func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) {
if len(ek) < 5 || ek[0] != db.index || ek[1] != SetType { pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, nil, err
}
if pos+1 > len(ek) || ek[pos] != SetType {
return nil, nil, errSetKey
}
pos++
if pos+2 > len(ek) {
return nil, nil, errSetKey return nil, nil, errSetKey
} }
pos := 2
keyLen := int(binary.BigEndian.Uint16(ek[pos:])) keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
pos += 2 pos += 2
if keyLen+5 > len(ek) { if keyLen+pos > len(ek) {
return nil, nil, errSetKey return nil, nil, errSetKey
} }

View File

@ -28,11 +28,12 @@ type ttlChecker struct {
var errExpType = errors.New("invalid expire type") var errExpType = errors.New("invalid expire type")
func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte { func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte {
buf := make([]byte, len(key)+11) buf := make([]byte, len(key)+10+len(db.indexVarBuf))
buf[0] = db.index pos := copy(buf, db.indexVarBuf)
buf[1] = ExpTimeType
pos := 2 buf[pos] = ExpTimeType
pos++
binary.BigEndian.PutUint64(buf[pos:], uint64(when)) binary.BigEndian.PutUint64(buf[pos:], uint64(when))
pos += 8 pos += 8
@ -46,12 +47,13 @@ func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte {
} }
func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte { func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte {
buf := make([]byte, len(key)+3) buf := make([]byte, len(key)+2+len(db.indexVarBuf))
buf[0] = db.index pos := copy(buf, db.indexVarBuf)
buf[1] = ExpMetaType buf[pos] = ExpMetaType
buf[2] = dataType pos++
pos := 3 buf[pos] = dataType
pos++
copy(buf[pos:], key) copy(buf[pos:], key)
@ -59,19 +61,29 @@ func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte {
} }
func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) { func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) {
if len(mk) <= 3 || mk[0] != db.index || mk[1] != ExpMetaType { pos, err := db.checkKeyIndex(mk)
if err != nil {
return 0, nil, err
}
if pos+2 > len(mk) || mk[pos] != ExpMetaType {
return 0, nil, errExpMetaKey return 0, nil, errExpMetaKey
} }
return mk[2], mk[3:], nil return mk[pos+1], mk[pos+2:], nil
} }
func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) { func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) {
if len(tk) < 11 || tk[0] != db.index || tk[1] != ExpTimeType { pos, err := db.checkKeyIndex(tk)
if err != nil {
return 0, nil, 0, err
}
if pos+10 > len(tk) || tk[pos] != ExpTimeType {
return 0, nil, 0, errExpTimeKey return 0, nil, 0, errExpTimeKey
} }
return tk[10], tk[11:], int64(binary.BigEndian.Uint64(tk[2:])), nil return tk[pos+9], tk[pos+10:], int64(binary.BigEndian.Uint64(tk[pos+1:])), nil
} }
func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) { func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) {

View File

@ -51,28 +51,31 @@ func checkZSetKMSize(key []byte, member []byte) error {
} }
func (db *DB) zEncodeSizeKey(key []byte) []byte { func (db *DB) zEncodeSizeKey(key []byte) []byte {
buf := make([]byte, len(key)+2) buf := make([]byte, len(key)+1+len(db.indexVarBuf))
buf[0] = db.index pos := copy(buf, db.indexVarBuf)
buf[1] = ZSizeType buf[pos] = ZSizeType
pos++
copy(buf[2:], key) copy(buf[pos:], key)
return buf return buf
} }
func (db *DB) zDecodeSizeKey(ek []byte) ([]byte, error) { func (db *DB) zDecodeSizeKey(ek []byte) ([]byte, error) {
if len(ek) < 2 || ek[0] != db.index || ek[1] != ZSizeType { pos, err := db.checkKeyIndex(ek)
return nil, errZSizeKey if err != nil {
return nil, err
} }
return ek[2:], nil if pos+1 > len(ek) || ek[pos] != ZSizeType {
return nil, errZSizeKey
}
pos++
return ek[pos:], nil
} }
func (db *DB) zEncodeSetKey(key []byte, member []byte) []byte { func (db *DB) zEncodeSetKey(key []byte, member []byte) []byte {
buf := make([]byte, len(key)+len(member)+5) buf := make([]byte, len(key)+len(member)+4+len(db.indexVarBuf))
pos := 0 pos := copy(buf, db.indexVarBuf)
buf[pos] = db.index
pos++
buf[pos] = ZSetType buf[pos] = ZSetType
pos++ pos++
@ -92,22 +95,35 @@ func (db *DB) zEncodeSetKey(key []byte, member []byte) []byte {
} }
func (db *DB) zDecodeSetKey(ek []byte) ([]byte, []byte, error) { func (db *DB) zDecodeSetKey(ek []byte) ([]byte, []byte, error) {
if len(ek) < 5 || ek[0] != db.index || ek[1] != ZSetType { pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, nil, err
}
if pos+1 > len(ek) || ek[pos] != ZSetType {
return nil, nil, errZSetKey return nil, nil, errZSetKey
} }
keyLen := int(binary.BigEndian.Uint16(ek[2:])) pos++
if keyLen+5 > len(ek) {
if pos+2 > len(ek) {
return nil, nil, errZSetKey return nil, nil, errZSetKey
} }
key := ek[4 : 4+keyLen] keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
if keyLen+pos > len(ek) {
if ek[4+keyLen] != zsetStartMemSep {
return nil, nil, errZSetKey return nil, nil, errZSetKey
} }
member := ek[5+keyLen:] pos += 2
key := ek[pos : pos+keyLen]
if ek[pos+keyLen] != zsetStartMemSep {
return nil, nil, errZSetKey
}
pos++
member := ek[pos+keyLen:]
return key, member, nil return key, member, nil
} }
@ -123,11 +139,9 @@ func (db *DB) zEncodeStopSetKey(key []byte) []byte {
} }
func (db *DB) zEncodeScoreKey(key []byte, member []byte, score int64) []byte { func (db *DB) zEncodeScoreKey(key []byte, member []byte, score int64) []byte {
buf := make([]byte, len(key)+len(member)+14) buf := make([]byte, len(key)+len(member)+13+len(db.indexVarBuf))
pos := 0 pos := copy(buf, db.indexVarBuf)
buf[pos] = db.index
pos++
buf[pos] = ZScoreType buf[pos] = ZScoreType
pos++ pos++
@ -166,20 +180,38 @@ func (db *DB) zEncodeStopScoreKey(key []byte, score int64) []byte {
} }
func (db *DB) zDecodeScoreKey(ek []byte) (key []byte, member []byte, score int64, err error) { func (db *DB) zDecodeScoreKey(ek []byte) (key []byte, member []byte, score int64, err error) {
if len(ek) < 14 || ek[0] != db.index || ek[1] != ZScoreType { pos := 0
pos, err = db.checkKeyIndex(ek)
if err != nil {
return
}
if pos+1 > len(ek) || ek[pos] != ZScoreType {
err = errZScoreKey
return
}
pos++
if pos+2 > len(ek) {
err = errZScoreKey
return
}
keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
pos += 2
if keyLen+pos > len(ek) {
err = errZScoreKey err = errZScoreKey
return return
} }
keyLen := int(binary.BigEndian.Uint16(ek[2:])) key = ek[pos : pos+keyLen]
if keyLen+14 > len(ek) { pos += keyLen
if pos+10 > len(ek) {
err = errZScoreKey err = errZScoreKey
return return
} }
key = ek[4 : 4+keyLen]
pos := 4 + keyLen
if (ek[pos] != zsetNScoreSep) && (ek[pos] != zsetPScoreSep) { if (ek[pos] != zsetNScoreSep) && (ek[pos] != zsetPScoreSep) {
err = errZScoreKey err = errZScoreKey
return return