From 39d6db56bd21566b7d8ceafb4e6d277163c05b4a Mon Sep 17 00:00:00 2001 From: siddontang Date: Sat, 13 Sep 2014 23:06:36 +0800 Subject: [PATCH 01/42] change open file and mkdir mode open file use 0644 mkdir use 0755 --- cmd/ledis-dump/main.go | 2 +- ledis/binlog.go | 2 +- server/replication.go | 4 ++-- store/boltdb/db.go | 2 +- store/goleveldb/db.go | 2 +- store/hyperleveldb/db.go | 2 +- store/leveldb/db.go | 2 +- store/rocksdb/db.go | 2 +- store/store.go | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/ledis-dump/main.go b/cmd/ledis-dump/main.go index bf02ad7..b6a798d 100644 --- a/cmd/ledis-dump/main.go +++ b/cmd/ledis-dump/main.go @@ -23,7 +23,7 @@ func main() { var err error var f *os.File - if f, err = os.OpenFile(*dumpFile, os.O_CREATE|os.O_WRONLY, os.ModePerm); err != nil { + if f, err = os.OpenFile(*dumpFile, os.O_CREATE|os.O_WRONLY, 0644); err != nil { println(err.Error()) return } diff --git a/ledis/binlog.go b/ledis/binlog.go index 077398a..f26323e 100644 --- a/ledis/binlog.go +++ b/ledis/binlog.go @@ -117,7 +117,7 @@ func NewBinLog(cfg *config.Config) (*BinLog, error) { l.path = path.Join(cfg.DataDir, "binlog") - if err := os.MkdirAll(l.path, os.ModePerm); err != nil { + if err := os.MkdirAll(l.path, 0755); err != nil { return nil, err } diff --git a/server/replication.go b/server/replication.go index 267a29b..445a813 100644 --- a/server/replication.go +++ b/server/replication.go @@ -38,7 +38,7 @@ func (m *MasterInfo) Save(filePath string) error { filePathBak := fmt.Sprintf("%s.bak", filePath) var fd *os.File - fd, err = os.OpenFile(filePathBak, os.O_CREATE|os.O_WRONLY, os.ModePerm) + fd, err = os.OpenFile(filePathBak, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return err } @@ -250,7 +250,7 @@ func (m *master) fullSync() error { } dumpPath := path.Join(m.app.cfg.DataDir, "master.dump") - f, err := os.OpenFile(dumpPath, os.O_CREATE|os.O_WRONLY, os.ModePerm) + f, err := os.OpenFile(dumpPath, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return err } diff --git a/store/boltdb/db.go b/store/boltdb/db.go index f2cb1f3..15a0570 100644 --- a/store/boltdb/db.go +++ b/store/boltdb/db.go @@ -18,7 +18,7 @@ func (s Store) String() string { } func (s Store) Open(dbPath string, cfg *config.Config) (driver.IDB, error) { - os.MkdirAll(dbPath, os.ModePerm) + os.MkdirAll(dbPath, 0755) name := path.Join(dbPath, "ledis_bolt.db") db := new(DB) var err error diff --git a/store/goleveldb/db.go b/store/goleveldb/db.go index 2a13f50..dad6a90 100644 --- a/store/goleveldb/db.go +++ b/store/goleveldb/db.go @@ -47,7 +47,7 @@ type DB struct { } func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { - if err := os.MkdirAll(path, os.ModePerm); err != nil { + if err := os.MkdirAll(path, 0755); err != nil { return nil, err } diff --git a/store/hyperleveldb/db.go b/store/hyperleveldb/db.go index d6d7aeb..071dd38 100644 --- a/store/hyperleveldb/db.go +++ b/store/hyperleveldb/db.go @@ -28,7 +28,7 @@ func (s Store) String() string { } func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { - if err := os.MkdirAll(path, os.ModePerm); err != nil { + if err := os.MkdirAll(path, 0755); err != nil { return nil, err } diff --git a/store/leveldb/db.go b/store/leveldb/db.go index 92a2419..ab29928 100644 --- a/store/leveldb/db.go +++ b/store/leveldb/db.go @@ -28,7 +28,7 @@ func (s Store) String() string { } func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { - if err := os.MkdirAll(path, os.ModePerm); err != nil { + if err := os.MkdirAll(path, 0755); err != nil { return nil, err } diff --git a/store/rocksdb/db.go b/store/rocksdb/db.go index 1c79229..f3fb406 100644 --- a/store/rocksdb/db.go +++ b/store/rocksdb/db.go @@ -29,7 +29,7 @@ func (s Store) String() string { } func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { - if err := os.MkdirAll(path, os.ModePerm); err != nil { + if err := os.MkdirAll(path, 0755); err != nil { return nil, err } diff --git a/store/store.go b/store/store.go index d620c03..aa4b485 100644 --- a/store/store.go +++ b/store/store.go @@ -27,7 +27,7 @@ func Open(cfg *config.Config) (*DB, error) { path := getStorePath(cfg) - if err := os.MkdirAll(path, os.ModePerm); err != nil { + if err := os.MkdirAll(path, 0755); err != nil { return nil, err } From 95cbcc6460d932fee47d1ce5765925c9d81d55cf Mon Sep 17 00:00:00 2001 From: siddontang Date: Mon, 15 Sep 2014 22:42:15 +0800 Subject: [PATCH 02/42] add dbpath config --- config/config.go | 1 + config/config.toml | 3 +++ etc/ledis.conf | 3 +++ store/store.go | 6 +++++- 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index ca93d29..1464edf 100644 --- a/config/config.go +++ b/config/config.go @@ -50,6 +50,7 @@ type Config struct { DataDir string `toml:"data_dir"` DBName string `toml:"db_name"` + DBPath string `toml:"db_path"` LevelDB LevelDBConfig `toml:"leveldb"` diff --git a/config/config.toml b/config/config.toml index 2a3a246..f271a70 100644 --- a/config/config.toml +++ b/config/config.toml @@ -27,6 +27,9 @@ slaveof = "" # db_name = "leveldb" +# if not set, use data_dir/"db_name"_data +db_path = "" + [leveldb] compression = false block_size = 32768 diff --git a/etc/ledis.conf b/etc/ledis.conf index d3adbd8..c0606eb 100644 --- a/etc/ledis.conf +++ b/etc/ledis.conf @@ -29,6 +29,9 @@ slaveof = "" # db_name = "leveldb" +# if not set, use data_dir/"db_name"_data +db_path = "" + [leveldb] compression = false block_size = 32768 diff --git a/store/store.go b/store/store.go index aa4b485..2edde30 100644 --- a/store/store.go +++ b/store/store.go @@ -16,7 +16,11 @@ import ( ) func getStorePath(cfg *config.Config) string { - return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName)) + if len(cfg.DBPath) > 0 { + return cfg.DBPath + } else { + return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName)) + } } func Open(cfg *config.Config) (*DB, error) { From 63e24376113bd69756205663cd9850929649e9dd Mon Sep 17 00:00:00 2001 From: siddontang Date: Mon, 15 Sep 2014 22:42:25 +0800 Subject: [PATCH 03/42] add base awl package --- wal/file_store.go | 83 +++++++++++++++ wal/gen.go | 137 ++++++++++++++++++++++++ wal/gen_test.go | 48 +++++++++ wal/goleveldb_store.go | 229 +++++++++++++++++++++++++++++++++++++++++ wal/log.go | 73 +++++++++++++ wal/log_test.go | 27 +++++ wal/store_test.go | 137 ++++++++++++++++++++++++ wal/wal.go | 46 +++++++++ 8 files changed, 780 insertions(+) create mode 100644 wal/file_store.go create mode 100644 wal/gen.go create mode 100644 wal/gen_test.go create mode 100644 wal/goleveldb_store.go create mode 100644 wal/log.go create mode 100644 wal/log_test.go create mode 100644 wal/store_test.go create mode 100644 wal/wal.go diff --git a/wal/file_store.go b/wal/file_store.go new file mode 100644 index 0000000..0d39efb --- /dev/null +++ b/wal/file_store.go @@ -0,0 +1,83 @@ +package wal + +import ( + "os" + "sync" +) + +const ( + defaultMaxLogFileSize = 1024 * 1024 * 1024 + defaultMaxLogFileNum = 10 +) + +type FileStore struct { + Store + + m sync.Mutex + + maxFileSize int + maxFileNum int + + first uint64 + last uint64 +} + +func NewFileStore(path string) (*FileStore, error) { + s := new(FileStore) + + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + s.maxFileSize = defaultMaxLogFileSize + s.maxFileNum = defaultMaxLogFileNum + + s.first = 0 + s.last = 0 + + return s, nil +} + +func (s *FileStore) SetMaxFileSize(size int) { + s.maxFileSize = size +} + +func (s *FileStore) SetMaxFileNum(n int) { + s.maxFileNum = n +} + +func (s *FileStore) GetLog(id uint64, log *Log) error { + return nil +} + +func (s *FileStore) SeekLog(id uint64, log *Log) error { + return nil +} + +func (s *FileStore) FirstID() (uint64, error) { + return 0, nil +} + +func (s *FileStore) LastID() (uint64, error) { + return 0, nil +} + +func (s *FileStore) StoreLog(log *Log) error { + return nil +} + +func (s *FileStore) StoreLogs(logs []*Log) error { + return nil +} + +func (s *FileStore) DeleteRange(start, stop uint64) error { + return nil +} + +func (s *FileStore) Clear() error { + return nil +} + +func (s *FileStore) Close() error { + return nil +} diff --git a/wal/gen.go b/wal/gen.go new file mode 100644 index 0000000..9d84938 --- /dev/null +++ b/wal/gen.go @@ -0,0 +1,137 @@ +package wal + +import ( + "encoding/binary" + "fmt" + "os" + "path" + "sync" +) + +type FileIDGenerator struct { + LogIDGenerator + + m sync.Mutex + f *os.File + + id uint64 +} + +func NewFileIDGenerator(base string) (*FileIDGenerator, error) { + if err := os.MkdirAll(base, 0755); err != nil { + return nil, err + } + + g := new(FileIDGenerator) + + name := path.Join(base, "log.id") + + var err error + if g.f, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644); err != nil { + return nil, err + } + + s, _ := g.f.Stat() + if s.Size() == 0 { + g.id = 0 + } else if s.Size() == 8 { + if err = binary.Read(g.f, binary.BigEndian, &g.id); err != nil { + g.f.Close() + return nil, err + } else if g.id == InvalidLogID { + g.f.Close() + return nil, fmt.Errorf("read invalid log id in %s", name) + } + } else { + g.f.Close() + return nil, fmt.Errorf("log id file %s is invalid", name) + } + + return g, nil +} + +func (g *FileIDGenerator) Reset(id uint64) error { + g.m.Lock() + defer g.m.Unlock() + + if g.f == nil { + return fmt.Errorf("generator closed") + } + + if g.id < id { + g.id = id + } + + return nil +} + +func (g *FileIDGenerator) GenerateID() (uint64, error) { + g.m.Lock() + defer g.m.Unlock() + + if g.f == nil { + return 0, fmt.Errorf("generator closed") + } + + if _, err := g.f.Seek(0, os.SEEK_SET); err != nil { + return 0, nil + } + + id := g.id + 1 + + if err := binary.Write(g.f, binary.BigEndian, id); err != nil { + return 0, nil + } + + g.id = id + + return id, nil +} + +func (g *FileIDGenerator) Close() error { + g.m.Lock() + defer g.m.Unlock() + + if g.f != nil { + err := g.f.Close() + g.f = nil + return err + } + return nil +} + +type MemIDGenerator struct { + m sync.Mutex + + LogIDGenerator + + id uint64 +} + +func NewMemIDGenerator(baseID uint64) *MemIDGenerator { + g := &MemIDGenerator{id: baseID} + return g +} + +func (g *MemIDGenerator) Reset(id uint64) error { + g.m.Lock() + defer g.m.Unlock() + + if g.id < id { + g.id = id + } + return nil +} + +func (g *MemIDGenerator) GenerateID() (uint64, error) { + g.m.Lock() + defer g.m.Unlock() + + g.id++ + id := g.id + return id, nil +} + +func (g *MemIDGenerator) Close() error { + return nil +} diff --git a/wal/gen_test.go b/wal/gen_test.go new file mode 100644 index 0000000..2f60999 --- /dev/null +++ b/wal/gen_test.go @@ -0,0 +1,48 @@ +package wal + +import ( + "io/ioutil" + "os" + "testing" +) + +func testGenerator(t *testing.T, g LogIDGenerator, base uint64) { + for i := base; i < base+100; i++ { + id, err := g.GenerateID() + if err != nil { + t.Fatal(err) + } else if id != i { + t.Fatal(id, i) + } + } +} + +func TestGenerator(t *testing.T) { + base, err := ioutil.TempDir("", "wal") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(base) + + var g *FileIDGenerator + if g, err = NewFileIDGenerator(base); err != nil { + t.Fatal(err) + } else { + testGenerator(t, g, 1) + if err = g.Close(); err != nil { + t.Fatal(err) + } + } + + if g, err = NewFileIDGenerator(base); err != nil { + t.Fatal(err) + } else { + testGenerator(t, g, 101) + if err = g.Close(); err != nil { + t.Fatal(err) + } + } + + m := NewMemIDGenerator(100) + testGenerator(t, m, 101) +} diff --git a/wal/goleveldb_store.go b/wal/goleveldb_store.go new file mode 100644 index 0000000..0510432 --- /dev/null +++ b/wal/goleveldb_store.go @@ -0,0 +1,229 @@ +package wal + +import ( + "bytes" + "github.com/siddontang/go/num" + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/store" + "os" + "sync" +) + +type GoLevelDBStore struct { + m sync.Mutex + db *store.DB + + cfg *config.Config + + first uint64 + last uint64 +} + +func (s *GoLevelDBStore) FirstID() (uint64, error) { + s.m.Lock() + defer s.m.Unlock() + return s.firstID() +} + +func (s *GoLevelDBStore) LastID() (uint64, error) { + s.m.Lock() + defer s.m.Unlock() + return s.lastID() +} + +func (s *GoLevelDBStore) firstID() (uint64, error) { + if s.first != InvalidLogID { + return s.first, nil + } + + it := s.db.NewIterator() + defer it.Close() + + it.SeekToFirst() + + if it.Valid() { + s.first = num.BytesToUint64(it.RawKey()) + } + + return s.first, nil +} + +func (s *GoLevelDBStore) lastID() (uint64, error) { + if s.last != InvalidLogID { + return s.last, nil + } + + it := s.db.NewIterator() + defer it.Close() + + it.SeekToLast() + + if it.Valid() { + s.last = num.BytesToUint64(it.RawKey()) + } + + return s.last, nil +} + +func (s *GoLevelDBStore) GetLog(id uint64, log *Log) error { + v, err := s.db.Get(num.Uint64ToBytes(id)) + if err != nil { + return err + } else if v == nil { + return ErrLogNotFound + } else { + return log.Decode(bytes.NewBuffer(v)) + } +} + +func (s *GoLevelDBStore) SeekLog(id uint64, log *Log) error { + it := s.db.NewIterator() + defer it.Close() + + it.Seek(num.Uint64ToBytes(id)) + + if !it.Valid() { + return ErrLogNotFound + } else { + return log.Decode(bytes.NewBuffer(it.RawValue())) + } +} + +func (s *GoLevelDBStore) StoreLog(log *Log) error { + return s.StoreLogs([]*Log{log}) +} + +func (s *GoLevelDBStore) StoreLogs(logs []*Log) error { + s.m.Lock() + defer s.m.Unlock() + + w := s.db.NewWriteBatch() + defer w.Rollback() + + last := s.last + + s.reset() + + var buf bytes.Buffer + for _, log := range logs { + buf.Reset() + + if log.ID <= last { + return ErrLessLogID + } + + last = log.ID + key := num.Uint64ToBytes(log.ID) + + if err := log.Encode(&buf); err != nil { + return err + } + w.Put(key, buf.Bytes()) + } + + return w.Commit() +} + +func (s *GoLevelDBStore) DeleteRange(min, max uint64) error { + s.m.Lock() + defer s.m.Unlock() + + var first, last uint64 + var err error + + first, err = s.firstID() + if err != nil { + return err + } + + last, err = s.lastID() + if err != nil { + return err + } + + min = num.MaxUint64(min, first) + max = num.MinUint64(max, last) + + w := s.db.NewWriteBatch() + defer w.Rollback() + + n := 0 + + s.reset() + + for i := min; i <= max; i++ { + w.Delete(num.Uint64ToBytes(i)) + n++ + if n > 1024 { + if err = w.Commit(); err != nil { + return err + } + n = 0 + } + } + + if err = w.Commit(); err != nil { + return err + } + return nil +} + +func (s *GoLevelDBStore) Clear() error { + s.m.Lock() + defer s.m.Unlock() + + if s.db != nil { + s.db.Close() + } + + os.RemoveAll(s.cfg.DBPath) + + return s.open() +} + +func (s *GoLevelDBStore) reset() { + s.first = InvalidLogID + s.last = InvalidLogID +} + +func (s *GoLevelDBStore) Close() error { + s.m.Lock() + defer s.m.Unlock() + + if s.db == nil { + return nil + } + + err := s.db.Close() + s.db = nil + return err +} + +func (s *GoLevelDBStore) open() error { + var err error + + s.first = InvalidLogID + s.last = InvalidLogID + + s.db, err = store.Open(s.cfg) + return err +} + +func NewGoLevelDBStore(base string) (*GoLevelDBStore, error) { + cfg := new(config.Config) + cfg.DBName = "goleveldb" + cfg.DBPath = base + cfg.LevelDB.BlockSize = 4 * 1024 * 1024 + cfg.LevelDB.CacheSize = 16 * 1024 * 1024 + cfg.LevelDB.WriteBufferSize = 4 * 1024 * 1024 + cfg.LevelDB.Compression = false + + s := new(GoLevelDBStore) + s.cfg = cfg + + if err := s.open(); err != nil { + return nil, err + } + + return s, nil +} diff --git a/wal/log.go b/wal/log.go new file mode 100644 index 0000000..8ff8c95 --- /dev/null +++ b/wal/log.go @@ -0,0 +1,73 @@ +package wal + +import ( + "encoding/binary" + "io" +) + +type Log struct { + ID uint64 + CreateTime uint32 + // 0 for no compression + // 1 for snappy compression + Compression uint8 + Data []byte +} + +func (l *Log) Encode(w io.Writer) error { + length := uint32(17) + buf := make([]byte, length) + + pos := 0 + binary.BigEndian.PutUint64(buf[pos:], l.ID) + pos += 8 + + binary.BigEndian.PutUint32(buf[pos:], l.CreateTime) + pos += 4 + + buf[pos] = l.Compression + pos++ + + binary.BigEndian.PutUint32(buf[pos:], uint32(len(l.Data))) + + if n, err := w.Write(buf); err != nil { + return err + } else if n != len(buf) { + return io.ErrShortWrite + } + + if n, err := w.Write(l.Data); err != nil { + return err + } else if n != len(l.Data) { + return io.ErrShortWrite + } + return nil +} + +func (l *Log) Decode(r io.Reader) error { + length := uint32(17) + buf := make([]byte, length) + + if _, err := io.ReadFull(r, buf); err != nil { + return err + } + + pos := 0 + l.ID = binary.BigEndian.Uint64(buf[pos:]) + pos += 8 + + l.CreateTime = binary.BigEndian.Uint32(buf[pos:]) + pos += 4 + + l.Compression = buf[pos] + pos++ + + length = binary.BigEndian.Uint32(buf[pos:]) + + l.Data = make([]byte, length) + if _, err := io.ReadFull(r, l.Data); err != nil { + return err + } + + return nil +} diff --git a/wal/log_test.go b/wal/log_test.go new file mode 100644 index 0000000..ea5d91e --- /dev/null +++ b/wal/log_test.go @@ -0,0 +1,27 @@ +package wal + +import ( + "bytes" + "reflect" + "testing" +) + +func TestLog(t *testing.T) { + l1 := &Log{ID: 1, CreateTime: 100, Compression: 0, Data: []byte("hello world")} + + var buf bytes.Buffer + + if err := l1.Encode(&buf); err != nil { + t.Fatal(err) + } + + l2 := &Log{} + + if err := l2.Decode(&buf); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(l1, l2) { + t.Fatal("must equal") + } +} diff --git a/wal/store_test.go b/wal/store_test.go new file mode 100644 index 0000000..45636c6 --- /dev/null +++ b/wal/store_test.go @@ -0,0 +1,137 @@ +package wal + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestGoLevelDBStore(t *testing.T) { + // Create a test dir + dir, err := ioutil.TempDir("", "wal") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(dir) + + // New level + l, err := NewGoLevelDBStore(dir) + if err != nil { + t.Fatalf("err: %v ", err) + } + defer l.Close() + + testLogs(t, l) +} + +func testLogs(t *testing.T, l Store) { + // Should be no first index + idx, err := l.FirstID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 0 { + t.Fatalf("bad idx: %d", idx) + } + + // Should be no last index + idx, err = l.LastID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 0 { + t.Fatalf("bad idx: %d", idx) + } + + // Try a filed fetch + var out Log + if err := l.GetLog(10, &out); err.Error() != "log not found" { + t.Fatalf("err: %v ", err) + } + + // Write out a log + log := Log{ + ID: 1, + Data: []byte("first"), + } + for i := 1; i <= 10; i++ { + log.ID = uint64(i) + if err := l.StoreLog(&log); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Attempt to write multiple logs + var logs []*Log + for i := 11; i <= 20; i++ { + nl := &Log{ + ID: uint64(i), + Data: []byte("first"), + } + logs = append(logs, nl) + } + if err := l.StoreLogs(logs); err != nil { + t.Fatalf("err: %v", err) + } + + // Try to fetch + if err := l.GetLog(10, &out); err != nil { + t.Fatalf("err: %v ", err) + } + + // Try to fetch + if err := l.GetLog(20, &out); err != nil { + t.Fatalf("err: %v ", err) + } + + // Check the lowest index + idx, err = l.FirstID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 1 { + t.Fatalf("bad idx: %d", idx) + } + + // Check the highest index + idx, err = l.LastID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 20 { + t.Fatalf("bad idx: %d", idx) + } + + // Delete a suffix + if err := l.DeleteRange(5, 20); err != nil { + t.Fatalf("err: %v ", err) + } + + // Verify they are all deleted + for i := 5; i <= 20; i++ { + if err := l.GetLog(uint64(i), &out); err != ErrLogNotFound { + t.Fatalf("err: %v ", err) + } + } + + // Index should be one + idx, err = l.FirstID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 1 { + t.Fatalf("bad idx: %d", idx) + } + idx, err = l.LastID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 4 { + t.Fatalf("bad idx: %d", idx) + } + + // Should not be able to fetch + if err := l.GetLog(5, &out); err != ErrLogNotFound { + t.Fatalf("err: %v ", err) + } +} diff --git a/wal/wal.go b/wal/wal.go new file mode 100644 index 0000000..b32139c --- /dev/null +++ b/wal/wal.go @@ -0,0 +1,46 @@ +package wal + +import ( + "errors" +) + +const ( + InvalidLogID uint64 = 0 +) + +var ( + ErrLogNotFound = errors.New("log not found") + ErrLessLogID = errors.New("log id is less") +) + +type LogIDGenerator interface { + // Force reset to id, if current id is larger than id, nothing reset + Reset(id uint64) error + + // ID must be first at 1, and increased monotonously, 0 is invalid + GenerateID() (uint64, error) + + Close() error +} + +type Store interface { + GetLog(id uint64, log *Log) error + + // Get the first log which ID is equal or larger than id + SeekLog(id uint64, log *Log) error + + FirstID() (uint64, error) + LastID() (uint64, error) + + // if log id is less than current last id, return error + StoreLog(log *Log) error + StoreLogs(logs []*Log) error + + // Delete logs [start, stop] + DeleteRange(start, stop uint64) error + + // Clear all logs + Clear() error + + Close() error +} From 63f8f82727d3e99740fc887b7183b174a22f11a3 Mon Sep 17 00:00:00 2001 From: siddontang Date: Tue, 16 Sep 2014 08:39:52 +0800 Subject: [PATCH 04/42] remove id gen, update --- wal/gen.go | 137 ----------------------------------------- wal/gen_test.go | 48 --------------- wal/goleveldb_store.go | 16 ++++- wal/log.go | 18 ++++++ wal/log_test.go | 13 ++++ wal/wal.go | 10 --- 6 files changed, 44 insertions(+), 198 deletions(-) delete mode 100644 wal/gen.go delete mode 100644 wal/gen_test.go diff --git a/wal/gen.go b/wal/gen.go deleted file mode 100644 index 9d84938..0000000 --- a/wal/gen.go +++ /dev/null @@ -1,137 +0,0 @@ -package wal - -import ( - "encoding/binary" - "fmt" - "os" - "path" - "sync" -) - -type FileIDGenerator struct { - LogIDGenerator - - m sync.Mutex - f *os.File - - id uint64 -} - -func NewFileIDGenerator(base string) (*FileIDGenerator, error) { - if err := os.MkdirAll(base, 0755); err != nil { - return nil, err - } - - g := new(FileIDGenerator) - - name := path.Join(base, "log.id") - - var err error - if g.f, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644); err != nil { - return nil, err - } - - s, _ := g.f.Stat() - if s.Size() == 0 { - g.id = 0 - } else if s.Size() == 8 { - if err = binary.Read(g.f, binary.BigEndian, &g.id); err != nil { - g.f.Close() - return nil, err - } else if g.id == InvalidLogID { - g.f.Close() - return nil, fmt.Errorf("read invalid log id in %s", name) - } - } else { - g.f.Close() - return nil, fmt.Errorf("log id file %s is invalid", name) - } - - return g, nil -} - -func (g *FileIDGenerator) Reset(id uint64) error { - g.m.Lock() - defer g.m.Unlock() - - if g.f == nil { - return fmt.Errorf("generator closed") - } - - if g.id < id { - g.id = id - } - - return nil -} - -func (g *FileIDGenerator) GenerateID() (uint64, error) { - g.m.Lock() - defer g.m.Unlock() - - if g.f == nil { - return 0, fmt.Errorf("generator closed") - } - - if _, err := g.f.Seek(0, os.SEEK_SET); err != nil { - return 0, nil - } - - id := g.id + 1 - - if err := binary.Write(g.f, binary.BigEndian, id); err != nil { - return 0, nil - } - - g.id = id - - return id, nil -} - -func (g *FileIDGenerator) Close() error { - g.m.Lock() - defer g.m.Unlock() - - if g.f != nil { - err := g.f.Close() - g.f = nil - return err - } - return nil -} - -type MemIDGenerator struct { - m sync.Mutex - - LogIDGenerator - - id uint64 -} - -func NewMemIDGenerator(baseID uint64) *MemIDGenerator { - g := &MemIDGenerator{id: baseID} - return g -} - -func (g *MemIDGenerator) Reset(id uint64) error { - g.m.Lock() - defer g.m.Unlock() - - if g.id < id { - g.id = id - } - return nil -} - -func (g *MemIDGenerator) GenerateID() (uint64, error) { - g.m.Lock() - defer g.m.Unlock() - - g.id++ - id := g.id - return id, nil -} - -func (g *MemIDGenerator) Close() error { - return nil -} diff --git a/wal/gen_test.go b/wal/gen_test.go deleted file mode 100644 index 2f60999..0000000 --- a/wal/gen_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package wal - -import ( - "io/ioutil" - "os" - "testing" -) - -func testGenerator(t *testing.T, g LogIDGenerator, base uint64) { - for i := base; i < base+100; i++ { - id, err := g.GenerateID() - if err != nil { - t.Fatal(err) - } else if id != i { - t.Fatal(id, i) - } - } -} - -func TestGenerator(t *testing.T) { - base, err := ioutil.TempDir("", "wal") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(base) - - var g *FileIDGenerator - if g, err = NewFileIDGenerator(base); err != nil { - t.Fatal(err) - } else { - testGenerator(t, g, 1) - if err = g.Close(); err != nil { - t.Fatal(err) - } - } - - if g, err = NewFileIDGenerator(base); err != nil { - t.Fatal(err) - } else { - testGenerator(t, g, 101) - if err = g.Close(); err != nil { - t.Fatal(err) - } - } - - m := NewMemIDGenerator(100) - testGenerator(t, m, 101) -} diff --git a/wal/goleveldb_store.go b/wal/goleveldb_store.go index 0510432..2c9b09d 100644 --- a/wal/goleveldb_store.go +++ b/wal/goleveldb_store.go @@ -100,9 +100,12 @@ func (s *GoLevelDBStore) StoreLogs(logs []*Log) error { w := s.db.NewWriteBatch() defer w.Rollback() - last := s.last + last, err := s.lastID() + if err != nil { + return err + } - s.reset() + s.last = InvalidLogID var buf bytes.Buffer for _, log := range logs { @@ -121,7 +124,12 @@ func (s *GoLevelDBStore) StoreLogs(logs []*Log) error { w.Put(key, buf.Bytes()) } - return w.Commit() + if err := w.Commit(); err != nil { + return err + } + + s.last = last + return nil } func (s *GoLevelDBStore) DeleteRange(min, max uint64) error { @@ -165,6 +173,7 @@ func (s *GoLevelDBStore) DeleteRange(min, max uint64) error { if err = w.Commit(); err != nil { return err } + return nil } @@ -176,6 +185,7 @@ func (s *GoLevelDBStore) Clear() error { s.db.Close() } + s.reset() os.RemoveAll(s.cfg.DBPath) return s.open() diff --git a/wal/log.go b/wal/log.go index 8ff8c95..d567c60 100644 --- a/wal/log.go +++ b/wal/log.go @@ -1,6 +1,7 @@ package wal import ( + "bytes" "encoding/binary" "io" ) @@ -14,6 +15,23 @@ type Log struct { Data []byte } +func (l *Log) Marshal() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 17+len(l.Data))) + buf.Reset() + + if err := l.Encode(buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (l *Log) Unmarshal(b []byte) error { + buf := bytes.NewBuffer(b) + + return l.Decode(buf) +} + func (l *Log) Encode(w io.Writer) error { length := uint32(17) buf := make([]byte, length) diff --git a/wal/log_test.go b/wal/log_test.go index ea5d91e..cfd8c22 100644 --- a/wal/log_test.go +++ b/wal/log_test.go @@ -24,4 +24,17 @@ func TestLog(t *testing.T) { if !reflect.DeepEqual(l1, l2) { t.Fatal("must equal") } + + if buf, err := l1.Marshal(); err != nil { + t.Fatal(err) + } else { + if err = l2.Unmarshal(buf); err != nil { + t.Fatal(err) + } + } + + if !reflect.DeepEqual(l1, l2) { + t.Fatal("must equal") + } + } diff --git a/wal/wal.go b/wal/wal.go index b32139c..cc18870 100644 --- a/wal/wal.go +++ b/wal/wal.go @@ -13,16 +13,6 @@ var ( ErrLessLogID = errors.New("log id is less") ) -type LogIDGenerator interface { - // Force reset to id, if current id is larger than id, nothing reset - Reset(id uint64) error - - // ID must be first at 1, and increased monotonously, 0 is invalid - GenerateID() (uint64, error) - - Close() error -} - type Store interface { GetLog(id uint64, log *Log) error From 3a37b2e29713023dd80ef5dc9dee112bec09f58d Mon Sep 17 00:00:00 2001 From: siddontang Date: Tue, 16 Sep 2014 08:41:38 +0800 Subject: [PATCH 05/42] update store clear test --- wal/store_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/wal/store_test.go b/wal/store_test.go index 45636c6..5b32f3e 100644 --- a/wal/store_test.go +++ b/wal/store_test.go @@ -134,4 +134,24 @@ func testLogs(t *testing.T, l Store) { if err := l.GetLog(5, &out); err != ErrLogNotFound { t.Fatalf("err: %v ", err) } + + if err := l.Clear(); err != nil { + t.Fatal(err) + } + + idx, err = l.FirstID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 0 { + t.Fatalf("bad idx: %d", idx) + } + + idx, err = l.LastID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 0 { + t.Fatalf("bad idx: %d", idx) + } } From 8b8745be9251d0e8eb5782431a4d73e283691c68 Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 17 Sep 2014 17:54:04 +0800 Subject: [PATCH 06/42] refactor, can not build and run at all --- cmd/ledis-binlog/main.go | 85 ------ cmd/ledis-load/main.go | 15 +- config/config.go | 35 +-- config/config.toml | 10 +- etc/ledis.conf | 7 +- ledis/batch.go | 40 ++- ledis/binlog.go | 400 ----------------------------- ledis/binlog_test.go | 49 ---- ledis/const.go | 6 - ledis/dump.go | 77 +----- ledis/dump_test.go | 2 +- ledis/{binlog_util.go => event.go} | 157 +++++------ ledis/event_test.go | 34 +++ ledis/ledis.go | 18 +- ledis/ledis_test.go | 2 - ledis/t_hash.go | 2 - ledis/t_kv.go | 5 - ledis/t_zset.go | 4 +- ledis/tx.go | 6 - wal/file_store.go | 165 +++++++++++- wal/goleveldb_store.go | 57 +++- wal/log.go | 36 +-- wal/log_test.go | 1 - wal/store_test.go | 40 ++- wal/wal.go | 20 +- 25 files changed, 450 insertions(+), 823 deletions(-) delete mode 100644 cmd/ledis-binlog/main.go delete mode 100644 ledis/binlog.go delete mode 100644 ledis/binlog_test.go rename ledis/{binlog_util.go => event.go} (63%) create mode 100644 ledis/event_test.go diff --git a/cmd/ledis-binlog/main.go b/cmd/ledis-binlog/main.go deleted file mode 100644 index 3725920..0000000 --- a/cmd/ledis-binlog/main.go +++ /dev/null @@ -1,85 +0,0 @@ -package main - -import ( - "bufio" - "flag" - "fmt" - "github.com/siddontang/ledisdb/ledis" - "os" - "time" -) - -var TimeFormat = "2006-01-02 15:04:05" - -var startDateTime = flag.String("start-datetime", "", - "Start reading the binary log at the first event having a timestamp equal to or later than the datetime argument.") -var stopDateTime = flag.String("stop-datetime", "", - "Stop reading the binary log at the first event having a timestamp equal to or earlier than the datetime argument.") - -var startTime uint32 = 0 -var stopTime uint32 = 0xFFFFFFFF - -func main() { - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s [options] log_file\n", os.Args[0]) - flag.PrintDefaults() - } - - flag.Parse() - - logFile := flag.Arg(0) - f, err := os.Open(logFile) - if err != nil { - println(err.Error()) - return - } - defer f.Close() - - var t time.Time - - if len(*startDateTime) > 0 { - if t, err = time.Parse(TimeFormat, *startDateTime); err != nil { - println("parse start-datetime error: ", err.Error()) - return - } - - startTime = uint32(t.Unix()) - } - - if len(*stopDateTime) > 0 { - if t, err = time.Parse(TimeFormat, *stopDateTime); err != nil { - println("parse stop-datetime error: ", err.Error()) - return - } - - stopTime = uint32(t.Unix()) - } - - rb := bufio.NewReaderSize(f, 4096) - err = ledis.ReadEventFromReader(rb, printEvent) - if err != nil { - println("read event error: ", err.Error()) - return - } -} - -func printEvent(head *ledis.BinLogHead, event []byte) error { - if head.CreateTime < startTime || head.CreateTime > stopTime { - return nil - } - - t := time.Unix(int64(head.CreateTime), 0) - - fmt.Printf("%s ", t.Format(TimeFormat)) - - s, err := ledis.FormatBinLogEvent(event) - if err != nil { - fmt.Printf("%s", err.Error()) - } else { - fmt.Printf(s) - } - - fmt.Printf("\n") - - return nil -} diff --git a/cmd/ledis-load/main.go b/cmd/ledis-load/main.go index 34165b8..1edb250 100644 --- a/cmd/ledis-load/main.go +++ b/cmd/ledis-load/main.go @@ -57,18 +57,5 @@ func loadDump(cfg *config.Config, ldb *ledis.Ledis) error { return err } - var head *ledis.BinLogAnchor - head, err = ldb.LoadDumpFile(*dumpPath) - - if err != nil { - return err - } - - //master enable binlog, here output this like mysql - if head.LogFileIndex != 0 && head.LogPos != 0 { - format := "MASTER_LOG_FILE='binlog.%07d', MASTER_LOG_POS=%d;\n" - fmt.Printf(format, head.LogFileIndex, head.LogPos) - } - - return nil + return ldb.LoadDumpFile(*dumpPath) } diff --git a/config/config.go b/config/config.go index 1464edf..b54da8b 100644 --- a/config/config.go +++ b/config/config.go @@ -16,14 +16,6 @@ const ( DefaultDataDir string = "./var" ) -const ( - MaxBinLogFileSize int = 1024 * 1024 * 1024 - MaxBinLogFileNum int = 10000 - - DefaultBinLogFileSize int = MaxBinLogFileSize - DefaultBinLogFileNum int = 10 -) - type LevelDBConfig struct { Compression bool `toml:"compression"` BlockSize int `toml:"block_size"` @@ -37,9 +29,8 @@ type LMDBConfig struct { NoSync bool `toml:"nosync"` } -type BinLogConfig struct { - MaxFileSize int `toml:"max_file_size"` - MaxFileNum int `toml:"max_file_num"` +type WALConfig struct { + Path string `toml:"path"` } type Config struct { @@ -52,11 +43,13 @@ type Config struct { DBName string `toml:"db_name"` DBPath string `toml:"db_path"` + UseWAL bool `toml:use_wal` + LevelDB LevelDBConfig `toml:"leveldb"` LMDB LMDBConfig `toml:"lmdb"` - BinLog BinLogConfig `toml:"binlog"` + WAL WALConfig `toml:wal` SlaveOf string `toml:"slaveof"` @@ -93,10 +86,6 @@ func NewConfigDefault() *Config { cfg.DBName = DefaultDBName - // disable binlog - cfg.BinLog.MaxFileNum = 0 - cfg.BinLog.MaxFileSize = 0 - // disable replication cfg.SlaveOf = "" @@ -126,17 +115,3 @@ func (cfg *LevelDBConfig) Adjust() { cfg.MaxOpenFiles = 1024 } } - -func (cfg *BinLogConfig) Adjust() { - if cfg.MaxFileSize <= 0 { - cfg.MaxFileSize = DefaultBinLogFileSize - } else if cfg.MaxFileSize > MaxBinLogFileSize { - cfg.MaxFileSize = MaxBinLogFileSize - } - - if cfg.MaxFileNum <= 0 { - cfg.MaxFileNum = DefaultBinLogFileNum - } else if cfg.MaxFileNum > MaxBinLogFileNum { - cfg.MaxFileNum = MaxBinLogFileNum - } -} diff --git a/config/config.toml b/config/config.toml index f271a70..ae08c47 100644 --- a/config/config.toml +++ b/config/config.toml @@ -30,6 +30,8 @@ db_name = "leveldb" # if not set, use data_dir/"db_name"_data db_path = "" +use_wal = true + [leveldb] compression = false block_size = 32768 @@ -41,8 +43,10 @@ max_open_files = 1024 map_size = 524288000 nosync = true -[binlog] -max_file_size = 0 -max_file_num = 0 +[wal] +# if not set, use data_dir/wal +path = "" + + diff --git a/etc/ledis.conf b/etc/ledis.conf index c0606eb..0d46aee 100644 --- a/etc/ledis.conf +++ b/etc/ledis.conf @@ -43,9 +43,8 @@ max_open_files = 1024 map_size = 524288000 nosync = true -[binlog] -# Set either size or num to 0 to disable binlog -max_file_size = 0 -max_file_num = 0 +[wal] +# if not set, use data_dir/wal +path = "" diff --git a/ledis/batch.go b/ledis/batch.go index b23cc47..6f97457 100644 --- a/ledis/batch.go +++ b/ledis/batch.go @@ -12,9 +12,11 @@ type batch struct { sync.Locker - logs [][]byte + eb *eventBatch tx *Tx + + noLogging bool } func (b *batch) Commit() error { @@ -23,17 +25,6 @@ func (b *batch) Commit() error { err := b.WriteBatch.Commit() - if b.l.binlog != nil { - if err == nil { - if b.tx == nil { - b.l.binlog.Log(b.logs...) - } else { - b.tx.logs = append(b.tx.logs, b.logs...) - } - } - b.logs = [][]byte{} - } - return err } @@ -42,29 +33,28 @@ func (b *batch) Lock() { } func (b *batch) Unlock() { - if b.l.binlog != nil { - b.logs = [][]byte{} - } + b.noLogging = false b.WriteBatch.Rollback() b.Locker.Unlock() } func (b *batch) Put(key []byte, value []byte) { - if b.l.binlog != nil { - buf := encodeBinLogPut(key, value) - b.logs = append(b.logs, buf) - } b.WriteBatch.Put(key, value) } func (b *batch) Delete(key []byte) { - if b.l.binlog != nil { - buf := encodeBinLogDelete(key) - b.logs = append(b.logs, buf) - } + b.WriteBatch.Delete(key) } +func (b *batch) LogEanbled() bool { + return !b.noLogging && b.l.log != nil +} + +func (b *batch) DisableLog(d bool) { + b.noLogging = d +} + type dbBatchLocker struct { l *sync.Mutex wrLock *sync.RWMutex @@ -100,6 +90,8 @@ func (l *Ledis) newBatch(wb store.WriteBatch, locker sync.Locker, tx *Tx) *batch b.tx = tx b.Locker = locker - b.logs = [][]byte{} + b.eb = new(eventBatch) + b.noLogging = false + return b } diff --git a/ledis/binlog.go b/ledis/binlog.go deleted file mode 100644 index f26323e..0000000 --- a/ledis/binlog.go +++ /dev/null @@ -1,400 +0,0 @@ -package ledis - -import ( - "bufio" - "encoding/binary" - "fmt" - "github.com/siddontang/go-log/log" - "github.com/siddontang/ledisdb/config" - "io" - "io/ioutil" - "os" - "path" - "strconv" - "strings" - "sync" - "time" -) - -type BinLogHead struct { - CreateTime uint32 - BatchId uint32 - PayloadLen uint32 -} - -func (h *BinLogHead) Len() int { - return 12 -} - -func (h *BinLogHead) Write(w io.Writer) error { - if err := binary.Write(w, binary.BigEndian, h.CreateTime); err != nil { - return err - } - - if err := binary.Write(w, binary.BigEndian, h.BatchId); err != nil { - return err - } - - if err := binary.Write(w, binary.BigEndian, h.PayloadLen); err != nil { - return err - } - - return nil -} - -func (h *BinLogHead) handleReadError(err error) error { - if err == io.EOF { - return io.ErrUnexpectedEOF - } else { - return err - } -} - -func (h *BinLogHead) Read(r io.Reader) error { - var err error - if err = binary.Read(r, binary.BigEndian, &h.CreateTime); err != nil { - return err - } - - if err = binary.Read(r, binary.BigEndian, &h.BatchId); err != nil { - return h.handleReadError(err) - } - - if err = binary.Read(r, binary.BigEndian, &h.PayloadLen); err != nil { - return h.handleReadError(err) - } - - return nil -} - -func (h *BinLogHead) InSameBatch(ho *BinLogHead) bool { - if h.CreateTime == ho.CreateTime && h.BatchId == ho.BatchId { - return true - } else { - return false - } -} - -/* -index file format: -ledis-bin.00001 -ledis-bin.00002 -ledis-bin.00003 - -log file format - -Log: Head|PayloadData - -Head: createTime|batchId|payloadData - -*/ - -type BinLog struct { - sync.Mutex - - path string - - cfg *config.BinLogConfig - - logFile *os.File - - logWb *bufio.Writer - - indexName string - logNames []string - lastLogIndex int64 - - batchId uint32 - - ch chan struct{} -} - -func NewBinLog(cfg *config.Config) (*BinLog, error) { - l := new(BinLog) - - l.cfg = &cfg.BinLog - l.cfg.Adjust() - - l.path = path.Join(cfg.DataDir, "binlog") - - if err := os.MkdirAll(l.path, 0755); err != nil { - return nil, err - } - - l.logNames = make([]string, 0, 16) - - l.ch = make(chan struct{}) - - if err := l.loadIndex(); err != nil { - return nil, err - } - - return l, nil -} - -func (l *BinLog) flushIndex() error { - data := strings.Join(l.logNames, "\n") - - bakName := fmt.Sprintf("%s.bak", l.indexName) - f, err := os.OpenFile(bakName, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - log.Error("create binlog bak index error %s", err.Error()) - return err - } - - if _, err := f.WriteString(data); err != nil { - log.Error("write binlog index error %s", err.Error()) - f.Close() - return err - } - - f.Close() - - if err := os.Rename(bakName, l.indexName); err != nil { - log.Error("rename binlog bak index error %s", err.Error()) - return err - } - - return nil -} - -func (l *BinLog) loadIndex() error { - l.indexName = path.Join(l.path, fmt.Sprintf("ledis-bin.index")) - if _, err := os.Stat(l.indexName); os.IsNotExist(err) { - //no index file, nothing to do - } else { - indexData, err := ioutil.ReadFile(l.indexName) - if err != nil { - return err - } - - lines := strings.Split(string(indexData), "\n") - for _, line := range lines { - line = strings.Trim(line, "\r\n ") - if len(line) == 0 { - continue - } - - if _, err := os.Stat(path.Join(l.path, line)); err != nil { - log.Error("load index line %s error %s", line, err.Error()) - return err - } else { - l.logNames = append(l.logNames, line) - } - } - } - if l.cfg.MaxFileNum > 0 && len(l.logNames) > l.cfg.MaxFileNum { - //remove oldest logfile - if err := l.Purge(len(l.logNames) - l.cfg.MaxFileNum); err != nil { - return err - } - } - - var err error - if len(l.logNames) == 0 { - l.lastLogIndex = 1 - } else { - lastName := l.logNames[len(l.logNames)-1] - - if l.lastLogIndex, err = strconv.ParseInt(path.Ext(lastName)[1:], 10, 64); err != nil { - log.Error("invalid logfile name %s", err.Error()) - return err - } - - //like mysql, if server restart, a new binlog will create - l.lastLogIndex++ - } - - return nil -} - -func (l *BinLog) getLogFile() string { - return l.FormatLogFileName(l.lastLogIndex) -} - -func (l *BinLog) openNewLogFile() error { - var err error - lastName := l.getLogFile() - - logPath := path.Join(l.path, lastName) - if l.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666); err != nil { - log.Error("open new logfile error %s", err.Error()) - return err - } - - if l.cfg.MaxFileNum > 0 && len(l.logNames) == l.cfg.MaxFileNum { - l.purge(1) - } - - l.logNames = append(l.logNames, lastName) - - if l.logWb == nil { - l.logWb = bufio.NewWriterSize(l.logFile, 1024) - } else { - l.logWb.Reset(l.logFile) - } - - if err = l.flushIndex(); err != nil { - return err - } - - return nil -} - -func (l *BinLog) checkLogFileSize() bool { - if l.logFile == nil { - return false - } - - st, _ := l.logFile.Stat() - if st.Size() >= int64(l.cfg.MaxFileSize) { - l.closeLog() - return true - } - - return false -} - -func (l *BinLog) closeLog() { - if l.logFile == nil { - return - } - - l.lastLogIndex++ - - l.logFile.Close() - l.logFile = nil -} - -func (l *BinLog) purge(n int) { - if len(l.logNames) < n { - n = len(l.logNames) - } - for i := 0; i < n; i++ { - logPath := path.Join(l.path, l.logNames[i]) - os.Remove(logPath) - } - - copy(l.logNames[0:], l.logNames[n:]) - l.logNames = l.logNames[0 : len(l.logNames)-n] -} - -func (l *BinLog) Close() { - if l.logFile != nil { - l.logFile.Close() - l.logFile = nil - } -} - -func (l *BinLog) LogNames() []string { - return l.logNames -} - -func (l *BinLog) LogFileName() string { - return l.getLogFile() -} - -func (l *BinLog) LogFilePos() int64 { - if l.logFile == nil { - return 0 - } else { - st, _ := l.logFile.Stat() - return st.Size() - } -} - -func (l *BinLog) LogFileIndex() int64 { - return l.lastLogIndex -} - -func (l *BinLog) FormatLogFileName(index int64) string { - return fmt.Sprintf("ledis-bin.%07d", index) -} - -func (l *BinLog) FormatLogFilePath(index int64) string { - return path.Join(l.path, l.FormatLogFileName(index)) -} - -func (l *BinLog) LogPath() string { - return l.path -} - -func (l *BinLog) Purge(n int) error { - l.Lock() - defer l.Unlock() - - if len(l.logNames) == 0 { - return nil - } - - if n >= len(l.logNames) { - n = len(l.logNames) - //can not purge current log file - if l.logNames[n-1] == l.getLogFile() { - n = n - 1 - } - } - - l.purge(n) - - return l.flushIndex() -} - -func (l *BinLog) PurgeAll() error { - l.Lock() - defer l.Unlock() - - l.closeLog() - - l.purge(len(l.logNames)) - - return l.openNewLogFile() -} - -func (l *BinLog) Log(args ...[]byte) error { - l.Lock() - defer l.Unlock() - - var err error - - if l.logFile == nil { - if err = l.openNewLogFile(); err != nil { - return err - } - } - - head := &BinLogHead{} - - head.CreateTime = uint32(time.Now().Unix()) - head.BatchId = l.batchId - - l.batchId++ - - for _, data := range args { - head.PayloadLen = uint32(len(data)) - - if err := head.Write(l.logWb); err != nil { - return err - } - - if _, err := l.logWb.Write(data); err != nil { - return err - } - } - - if err = l.logWb.Flush(); err != nil { - log.Error("write log error %s", err.Error()) - return err - } - - l.checkLogFileSize() - - close(l.ch) - l.ch = make(chan struct{}) - - return nil -} - -func (l *BinLog) Wait() <-chan struct{} { - return l.ch -} diff --git a/ledis/binlog_test.go b/ledis/binlog_test.go deleted file mode 100644 index ea62bd9..0000000 --- a/ledis/binlog_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package ledis - -import ( - "github.com/siddontang/ledisdb/config" - "io/ioutil" - "os" - "testing" -) - -func TestBinLog(t *testing.T) { - cfg := new(config.Config) - - cfg.BinLog.MaxFileNum = 1 - cfg.BinLog.MaxFileSize = 1024 - cfg.DataDir = "/tmp/ledis_binlog" - - os.RemoveAll(cfg.DataDir) - - b, err := NewBinLog(cfg) - if err != nil { - t.Fatal(err) - } - - if err := b.Log(make([]byte, 1024)); err != nil { - t.Fatal(err) - } - - if err := b.Log(make([]byte, 1024)); err != nil { - t.Fatal(err) - } - - if fs, err := ioutil.ReadDir(b.LogPath()); err != nil { - t.Fatal(err) - } else if len(fs) != 2 { - t.Fatal(len(fs)) - } - - if err := b.PurgeAll(); err != nil { - t.Fatal(err) - } - - if fs, err := ioutil.ReadDir(b.LogPath()); err != nil { - t.Fatal(err) - } else if len(fs) != 2 { - t.Fatal(len(fs)) - } else if b.LogFilePos() != 0 { - t.Fatal(b.LogFilePos()) - } -} diff --git a/ledis/const.go b/ledis/const.go index e889f4e..9108736 100644 --- a/ledis/const.go +++ b/ledis/const.go @@ -81,12 +81,6 @@ var ( ErrScoreMiss = errors.New("zset score miss") ) -const ( - BinLogTypeDeletion uint8 = 0x0 - BinLogTypePut uint8 = 0x1 - BinLogTypeCommand uint8 = 0x2 -) - const ( DBAutoCommit uint8 = 0x0 DBInTransaction uint8 = 0x1 diff --git a/ledis/dump.go b/ledis/dump.go index f162481..6f3d81c 100644 --- a/ledis/dump.go +++ b/ledis/dump.go @@ -9,42 +9,6 @@ import ( "os" ) -//dump format -// fileIndex(bigendian int64)|filePos(bigendian int64) -// |keylen(bigendian int32)|key|valuelen(bigendian int32)|value...... -// -//key and value are both compressed for fast transfer dump on network using snappy - -type BinLogAnchor struct { - LogFileIndex int64 - LogPos int64 -} - -func (m *BinLogAnchor) WriteTo(w io.Writer) error { - if err := binary.Write(w, binary.BigEndian, m.LogFileIndex); err != nil { - return err - } - - if err := binary.Write(w, binary.BigEndian, m.LogPos); err != nil { - return err - } - return nil -} - -func (m *BinLogAnchor) ReadFrom(r io.Reader) error { - err := binary.Read(r, binary.BigEndian, &m.LogFileIndex) - if err != nil { - return err - } - - err = binary.Read(r, binary.BigEndian, &m.LogPos) - if err != nil { - return err - } - - return nil -} - func (l *Ledis) DumpFile(path string) error { f, err := os.Create(path) if err != nil { @@ -56,18 +20,11 @@ func (l *Ledis) DumpFile(path string) error { } func (l *Ledis) Dump(w io.Writer) error { - m := new(BinLogAnchor) - var err error l.wLock.Lock() defer l.wLock.Unlock() - if l.binlog != nil { - m.LogFileIndex = l.binlog.LogFileIndex() - m.LogPos = l.binlog.LogFilePos() - } - wb := bufio.NewWriterSize(w, 4096) if err = m.WriteTo(wb); err != nil { return err @@ -118,7 +75,7 @@ func (l *Ledis) Dump(w io.Writer) error { return nil } -func (l *Ledis) LoadDumpFile(path string) (*BinLogAnchor, error) { +func (l *Ledis) LoadDumpFile(path string) error { f, err := os.Open(path) if err != nil { return nil, err @@ -128,19 +85,12 @@ func (l *Ledis) LoadDumpFile(path string) (*BinLogAnchor, error) { return l.LoadDump(f) } -func (l *Ledis) LoadDump(r io.Reader) (*BinLogAnchor, error) { +func (l *Ledis) LoadDump(r io.Reader) error { l.wLock.Lock() defer l.wLock.Unlock() - info := new(BinLogAnchor) - rb := bufio.NewReaderSize(r, 4096) - err := info.ReadFrom(rb) - if err != nil { - return nil, err - } - var keyLen uint16 var valueLen uint32 @@ -154,33 +104,33 @@ func (l *Ledis) LoadDump(r io.Reader) (*BinLogAnchor, error) { for { if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF { - return nil, err + return err } else if err == io.EOF { break } if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil { - return nil, err + return err } if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil { - return nil, err + return err } if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil { - return nil, err + return err } if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil { - return nil, err + return err } if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil { - return nil, err + return err } if err = l.ldb.Put(key, value); err != nil { - return nil, err + return err } keyBuf.Reset() @@ -190,10 +140,11 @@ func (l *Ledis) LoadDump(r io.Reader) (*BinLogAnchor, error) { deKeyBuf = nil deValueBuf = nil - //if binlog enable, we will delete all binlogs and open a new one for handling simply - if l.binlog != nil { - l.binlog.PurgeAll() + //to do remove all wal log + + if l.log != nil { + l.log.Clear() } - return info, nil + return nil } diff --git a/ledis/dump_test.go b/ledis/dump_test.go index e29d928..8a1b2fa 100644 --- a/ledis/dump_test.go +++ b/ledis/dump_test.go @@ -38,7 +38,7 @@ func TestDump(t *testing.T) { t.Fatal(err) } - if _, err := slave.LoadDumpFile("/tmp/testdb.dump"); err != nil { + if err := slave.LoadDumpFile("/tmp/testdb.dump"); err != nil { t.Fatal(err) } diff --git a/ledis/binlog_util.go b/ledis/event.go similarity index 63% rename from ledis/binlog_util.go rename to ledis/event.go index da058bd..674e9c7 100644 --- a/ledis/binlog_util.go +++ b/ledis/event.go @@ -1,97 +1,108 @@ package ledis import ( + "bytes" "encoding/binary" "errors" - "fmt" + "io" "strconv" ) -var ( - errBinLogDeleteType = errors.New("invalid bin log delete type") - errBinLogPutType = errors.New("invalid bin log put type") - errBinLogCommandType = errors.New("invalid bin log command type") +const ( + kTypeDeleteEvent uint8 = 0 + kTypePutEvent uint8 = 1 ) -func encodeBinLogDelete(key []byte) []byte { - buf := make([]byte, 1+len(key)) - buf[0] = BinLogTypeDeletion - copy(buf[1:], key) - return buf +var ( + errInvalidPutEvent = errors.New("invalid put event") + errInvalidDeleteEvent = errors.New("invalid delete event") + errInvalidEvent = errors.New("invalid event") +) + +type eventBatch struct { + bytes.Buffer } -func decodeBinLogDelete(sz []byte) ([]byte, error) { - if len(sz) < 1 || sz[0] != BinLogTypeDeletion { - return nil, errBinLogDeleteType +type event struct { + key []byte + value []byte //value = nil for delete event +} + +func (b *eventBatch) Put(key []byte, value []byte) { + l := uint32(len(key) + len(value) + 1 + 2) + binary.Write(b, binary.BigEndian, l) + b.WriteByte(kTypePutEvent) + keyLen := uint16(len(key)) + binary.Write(b, binary.BigEndian, keyLen) + b.Write(key) + b.Write(value) +} + +func (b *eventBatch) Delete(key []byte) { + l := uint32(len(key) + 1) + binary.Write(b, binary.BigEndian, l) + b.WriteByte(kTypeDeleteEvent) + b.Write(key) +} + +func decodeEventBatch(data []byte) (ev []event, err error) { + ev = make([]event, 0, 16) + for { + if len(data) == 0 { + return ev, nil + } + + if len(data) < 4 { + return nil, io.ErrUnexpectedEOF + } + + l := binary.BigEndian.Uint32(data) + data = data[4:] + if uint32(len(data)) < l { + return nil, io.ErrUnexpectedEOF + } + + var e event + if err := decodeEvent(&e, data[0:l]); err != nil { + return nil, err + } + ev = append(ev, e) + data = data[l:] + } +} + +func decodeEvent(e *event, b []byte) error { + if len(b) == 0 { + return errInvalidEvent } - return sz[1:], nil -} + switch b[0] { + case kTypePutEvent: + if len(b[1:]) < 2 { + return errInvalidPutEvent + } -func encodeBinLogPut(key []byte, value []byte) []byte { - buf := make([]byte, 3+len(key)+len(value)) - buf[0] = BinLogTypePut - pos := 1 - binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) - pos += 2 - copy(buf[pos:], key) - pos += len(key) - copy(buf[pos:], value) + keyLen := binary.BigEndian.Uint16(b[1:3]) + b = b[3:] + if len(b) < int(keyLen) { + return errInvalidPutEvent + } - return buf -} - -func decodeBinLogPut(sz []byte) ([]byte, []byte, error) { - if len(sz) < 3 || sz[0] != BinLogTypePut { - return nil, nil, errBinLogPutType - } - - keyLen := int(binary.BigEndian.Uint16(sz[1:])) - if 3+keyLen > len(sz) { - return nil, nil, errBinLogPutType - } - - return sz[3 : 3+keyLen], sz[3+keyLen:], nil -} - -func FormatBinLogEvent(event []byte) (string, error) { - logType := uint8(event[0]) - - var err error - var k []byte - var v []byte - - var buf []byte = make([]byte, 0, 1024) - - switch logType { - case BinLogTypePut: - k, v, err = decodeBinLogPut(event) - buf = append(buf, "PUT "...) - case BinLogTypeDeletion: - k, err = decodeBinLogDelete(event) - buf = append(buf, "DELETE "...) + e.key = b[0:keyLen] + e.value = b[keyLen:] + case kTypeDeleteEvent: + e.value = nil + e.key = b[1:] default: - err = errInvalidBinLogEvent + return errInvalidEvent } - if err != nil { - return "", err - } - - if buf, err = formatDataKey(buf, k); err != nil { - return "", err - } - - if v != nil && len(v) != 0 { - buf = append(buf, fmt.Sprintf(" %q", v)...) - } - - return String(buf), nil + return nil } -func formatDataKey(buf []byte, k []byte) ([]byte, error) { +func formatEventKey(buf []byte, k []byte) ([]byte, error) { if len(k) < 2 { - return nil, errInvalidBinLogEvent + return nil, errInvalidEvent } buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...) @@ -208,7 +219,7 @@ func formatDataKey(buf []byte, k []byte) ([]byte, error) { buf = strconv.AppendQuote(buf, String(key)) } default: - return nil, errInvalidBinLogEvent + return nil, errInvalidEvent } return buf, nil diff --git a/ledis/event_test.go b/ledis/event_test.go new file mode 100644 index 0000000..0349ea7 --- /dev/null +++ b/ledis/event_test.go @@ -0,0 +1,34 @@ +package ledis + +import ( + "reflect" + "testing" +) + +func TestEvent(t *testing.T) { + k1 := []byte("k1") + v1 := []byte("v1") + k2 := []byte("k2") + k3 := []byte("k3") + v3 := []byte("v3") + + b := new(eventBatch) + + b.Put(k1, v1) + b.Delete(k2) + b.Put(k3, v3) + + buf := b.Bytes() + + ev2 := []event{ + event{k1, v1}, + event{k2, nil}, + event{k3, v3}, + } + + if ev, err := decodeEventBatch(buf); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ev, ev2) { + t.Fatal("not equal") + } +} diff --git a/ledis/ledis.go b/ledis/ledis.go index f3c1c8c..c4ac42f 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -5,6 +5,7 @@ import ( "github.com/siddontang/go-log/log" "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/store" + "github.com/siddontang/ledisdb/wal" "sync" "time" ) @@ -18,10 +19,12 @@ type Ledis struct { quit chan struct{} jobs *sync.WaitGroup - binlog *BinLog + log wal.Store wLock sync.RWMutex //allow one write at same time commitLock sync.Mutex //allow one write commit at same time + + readOnly bool } func Open(cfg *config.Config) (*Ledis, error) { @@ -41,13 +44,10 @@ func Open(cfg *config.Config) (*Ledis, error) { l.ldb = ldb - if cfg.BinLog.MaxFileNum > 0 && cfg.BinLog.MaxFileSize > 0 { - l.binlog, err = NewBinLog(cfg) - if err != nil { + if cfg.UseWAL { + if l.log, err = wal.NewStore(cfg); err != nil { return nil, err } - } else { - l.binlog = nil } for i := uint8(0); i < MaxDBNumber; i++ { @@ -65,9 +65,9 @@ func (l *Ledis) Close() { l.ldb.Close() - if l.binlog != nil { - l.binlog.Close() - l.binlog = nil + if l.log != nil { + l.log.Close() + l.log = nil } } diff --git a/ledis/ledis_test.go b/ledis/ledis_test.go index d5a5476..45f1c7f 100644 --- a/ledis/ledis_test.go +++ b/ledis/ledis_test.go @@ -14,8 +14,6 @@ func getTestDB() *DB { f := func() { cfg := new(config.Config) cfg.DataDir = "/tmp/test_ledis" - // cfg.BinLog.MaxFileSize = 1073741824 - // cfg.BinLog.MaxFileNum = 3 os.RemoveAll(cfg.DataDir) diff --git a/ledis/t_hash.go b/ledis/t_hash.go index 8ee199e..952ddae 100644 --- a/ledis/t_hash.go +++ b/ledis/t_hash.go @@ -183,8 +183,6 @@ func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) { return 0, err } - //todo add binlog - err = t.Commit() return n, err } diff --git a/ledis/t_kv.go b/ledis/t_kv.go index 1dd540a..fd13436 100644 --- a/ledis/t_kv.go +++ b/ledis/t_kv.go @@ -77,8 +77,6 @@ func (db *DB) incr(key []byte, delta int64) (int64, error) { t.Put(key, StrPutInt64(n)) - //todo binlog - err = t.Commit() return n, err } @@ -244,7 +242,6 @@ func (db *DB) MSet(args ...KVPair) error { t.Put(key, value) - //todo binlog } err = t.Commit() @@ -297,8 +294,6 @@ func (db *DB) SetNX(key []byte, value []byte) (int64, error) { } else { t.Put(key, value) - //todo binlog - err = t.Commit() } diff --git a/ledis/t_zset.go b/ledis/t_zset.go index 47af6ec..50fc6aa 100644 --- a/ledis/t_zset.go +++ b/ledis/t_zset.go @@ -305,7 +305,6 @@ func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) { return 0, err } - //todo add binlog err := t.Commit() return num, err } @@ -862,7 +861,6 @@ func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, agg sk := db.zEncodeSizeKey(destKey) t.Put(sk, PutInt64(num)) - //todo add binlog if err := t.Commit(); err != nil { return 0, err } @@ -930,7 +928,7 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg var num int64 = int64(len(destMap)) sk := db.zEncodeSizeKey(destKey) t.Put(sk, PutInt64(num)) - //todo add binlog + if err := t.Commit(); err != nil { return 0, err } diff --git a/ledis/tx.go b/ledis/tx.go index 6339bae..2d96bd3 100644 --- a/ledis/tx.go +++ b/ledis/tx.go @@ -15,8 +15,6 @@ type Tx struct { *DB tx *store.Tx - - logs [][]byte } func (db *DB) IsTransaction() bool { @@ -71,10 +69,6 @@ func (tx *Tx) Commit() error { err := tx.tx.Commit() tx.tx = nil - if len(tx.logs) > 0 { - tx.l.binlog.Log(tx.logs...) - } - tx.l.commitLock.Unlock() tx.l.wLock.Unlock() diff --git a/wal/file_store.go b/wal/file_store.go index 0d39efb..5eb800f 100644 --- a/wal/file_store.go +++ b/wal/file_store.go @@ -1,25 +1,44 @@ package wal import ( + "fmt" + "github.com/siddontang/go-log/log" + "io/ioutil" "os" + "path" + "strconv" + "strings" "sync" ) const ( defaultMaxLogFileSize = 1024 * 1024 * 1024 - defaultMaxLogFileNum = 10 ) +/* +index file format: +ledis-bin.00001 +ledis-bin.00002 +ledis-bin.00003 +*/ + type FileStore struct { Store m sync.Mutex maxFileSize int - maxFileNum int first uint64 last uint64 + + logFile *os.File + logNames []string + nextLogIndex int64 + + indexName string + + path string } func NewFileStore(path string) (*FileStore, error) { @@ -29,12 +48,19 @@ func NewFileStore(path string) (*FileStore, error) { return nil, err } + s.path = path + s.maxFileSize = defaultMaxLogFileSize - s.maxFileNum = defaultMaxLogFileNum s.first = 0 s.last = 0 + s.logNames = make([]string, 0, 16) + + if err := s.loadIndex(); err != nil { + return nil, err + } + return s, nil } @@ -42,10 +68,6 @@ func (s *FileStore) SetMaxFileSize(size int) { s.maxFileSize = size } -func (s *FileStore) SetMaxFileNum(n int) { - s.maxFileNum = n -} - func (s *FileStore) GetLog(id uint64, log *Log) error { return nil } @@ -70,7 +92,11 @@ func (s *FileStore) StoreLogs(logs []*Log) error { return nil } -func (s *FileStore) DeleteRange(start, stop uint64) error { +func (s *FileStore) Purge(n uint64) error { + return nil +} + +func (s *FileStore) PuregeExpired(n int) error { return nil } @@ -81,3 +107,126 @@ func (s *FileStore) Clear() error { func (s *FileStore) Close() error { return nil } + +func (s *FileStore) flushIndex() error { + data := strings.Join(s.logNames, "\n") + + bakName := fmt.Sprintf("%s.bak", s.indexName) + f, err := os.OpenFile(bakName, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + log.Error("create bak index error %s", err.Error()) + return err + } + + if _, err := f.WriteString(data); err != nil { + log.Error("write index error %s", err.Error()) + f.Close() + return err + } + + f.Close() + + if err := os.Rename(bakName, s.indexName); err != nil { + log.Error("rename bak index error %s", err.Error()) + return err + } + + return nil +} + +func (s *FileStore) fileExists(name string) bool { + p := path.Join(s.path, name) + _, err := os.Stat(p) + return !os.IsNotExist(err) +} + +func (s *FileStore) loadIndex() error { + s.indexName = path.Join(s.path, fmt.Sprintf("ledis-bin.index")) + if _, err := os.Stat(s.indexName); os.IsNotExist(err) { + //no index file, nothing to do + } else { + indexData, err := ioutil.ReadFile(s.indexName) + if err != nil { + return err + } + + lines := strings.Split(string(indexData), "\n") + for _, line := range lines { + line = strings.Trim(line, "\r\n ") + if len(line) == 0 { + continue + } + + if s.fileExists(line) { + s.logNames = append(s.logNames, line) + } else { + log.Info("log %s has not exists", line) + } + } + } + + var err error + if len(s.logNames) == 0 { + s.nextLogIndex = 1 + } else { + lastName := s.logNames[len(s.logNames)-1] + + if s.nextLogIndex, err = strconv.ParseInt(path.Ext(lastName)[1:], 10, 64); err != nil { + log.Error("invalid logfile name %s", err.Error()) + return err + } + + //like mysql, if server restart, a new log will create + s.nextLogIndex++ + } + + return nil +} + +func (s *FileStore) openNewLogFile() error { + var err error + lastName := s.formatLogFileName(s.nextLogIndex) + + logPath := path.Join(s.path, lastName) + if s.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0644); err != nil { + log.Error("open new logfile error %s", err.Error()) + return err + } + + s.logNames = append(s.logNames, lastName) + + if err = s.flushIndex(); err != nil { + return err + } + + return nil +} + +func (s *FileStore) checkLogFileSize() bool { + if s.logFile == nil { + return false + } + + st, _ := s.logFile.Stat() + if st.Size() >= int64(s.maxFileSize) { + s.closeLog() + return true + } + + return false +} + +func (s *FileStore) closeLog() { + if s.logFile == nil { + return + } + + s.nextLogIndex++ + + s.logFile.Close() + s.logFile = nil +} + +func (s *FileStore) formatLogFileName(index int64) string { + return fmt.Sprintf("ledis-bin.%07d", index) +} diff --git a/wal/goleveldb_store.go b/wal/goleveldb_store.go index 2c9b09d..4e78eb8 100644 --- a/wal/goleveldb_store.go +++ b/wal/goleveldb_store.go @@ -2,11 +2,13 @@ package wal import ( "bytes" + "fmt" "github.com/siddontang/go/num" "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/store" "os" "sync" + "time" ) type GoLevelDBStore struct { @@ -132,7 +134,7 @@ func (s *GoLevelDBStore) StoreLogs(logs []*Log) error { return nil } -func (s *GoLevelDBStore) DeleteRange(min, max uint64) error { +func (s *GoLevelDBStore) Purge(n uint64) error { s.m.Lock() defer s.m.Unlock() @@ -149,25 +151,16 @@ func (s *GoLevelDBStore) DeleteRange(min, max uint64) error { return err } - min = num.MaxUint64(min, first) - max = num.MinUint64(max, last) + start := first + stop := num.MinUint64(last, first+n) w := s.db.NewWriteBatch() defer w.Rollback() - n := 0 - s.reset() - for i := min; i <= max; i++ { + for i := start; i < stop; i++ { w.Delete(num.Uint64ToBytes(i)) - n++ - if n > 1024 { - if err = w.Commit(); err != nil { - return err - } - n = 0 - } } if err = w.Commit(); err != nil { @@ -177,6 +170,44 @@ func (s *GoLevelDBStore) DeleteRange(min, max uint64) error { return nil } +func (s *GoLevelDBStore) PurgeExpired(n int) error { + if n <= 0 { + return fmt.Errorf("invalid expired time %d", n) + } + + t := uint32(time.Now().Unix() - int64(n)) + + s.m.Lock() + defer s.m.Unlock() + + s.reset() + + it := s.db.NewIterator() + it.SeekToFirst() + + w := s.db.NewWriteBatch() + defer w.Rollback() + + l := new(Log) + for ; it.Valid(); it.Next() { + v := it.RawValue() + + if err := l.Unmarshal(v); err != nil { + return err + } else if l.CreateTime > t { + break + } else { + w.Delete(it.RawKey()) + } + } + + if err := w.Commit(); err != nil { + return err + } + + return nil +} + func (s *GoLevelDBStore) Clear() error { s.m.Lock() defer s.m.Unlock() diff --git a/wal/log.go b/wal/log.go index d567c60..c150513 100644 --- a/wal/log.go +++ b/wal/log.go @@ -4,19 +4,31 @@ import ( "bytes" "encoding/binary" "io" + "time" ) type Log struct { ID uint64 CreateTime uint32 - // 0 for no compression - // 1 for snappy compression - Compression uint8 - Data []byte + + Data []byte +} + +func NewLog(id uint64, data []byte) *Log { + l := new(Log) + l.ID = id + l.CreateTime = uint32(time.Now().Unix()) + l.Data = data + + return l +} + +func (l *Log) HeadSize() int { + return 16 } func (l *Log) Marshal() ([]byte, error) { - buf := bytes.NewBuffer(make([]byte, 17+len(l.Data))) + buf := bytes.NewBuffer(make([]byte, l.HeadSize()+len(l.Data))) buf.Reset() if err := l.Encode(buf); err != nil { @@ -33,8 +45,7 @@ func (l *Log) Unmarshal(b []byte) error { } func (l *Log) Encode(w io.Writer) error { - length := uint32(17) - buf := make([]byte, length) + buf := make([]byte, l.HeadSize()) pos := 0 binary.BigEndian.PutUint64(buf[pos:], l.ID) @@ -43,9 +54,6 @@ func (l *Log) Encode(w io.Writer) error { binary.BigEndian.PutUint32(buf[pos:], l.CreateTime) pos += 4 - buf[pos] = l.Compression - pos++ - binary.BigEndian.PutUint32(buf[pos:], uint32(len(l.Data))) if n, err := w.Write(buf); err != nil { @@ -63,8 +71,7 @@ func (l *Log) Encode(w io.Writer) error { } func (l *Log) Decode(r io.Reader) error { - length := uint32(17) - buf := make([]byte, length) + buf := make([]byte, l.HeadSize()) if _, err := io.ReadFull(r, buf); err != nil { return err @@ -77,10 +84,7 @@ func (l *Log) Decode(r io.Reader) error { l.CreateTime = binary.BigEndian.Uint32(buf[pos:]) pos += 4 - l.Compression = buf[pos] - pos++ - - length = binary.BigEndian.Uint32(buf[pos:]) + length := binary.BigEndian.Uint32(buf[pos:]) l.Data = make([]byte, length) if _, err := io.ReadFull(r, l.Data); err != nil { diff --git a/wal/log_test.go b/wal/log_test.go index cfd8c22..46109cd 100644 --- a/wal/log_test.go +++ b/wal/log_test.go @@ -36,5 +36,4 @@ func TestLog(t *testing.T) { if !reflect.DeepEqual(l1, l2) { t.Fatal("must equal") } - } diff --git a/wal/store_test.go b/wal/store_test.go index 5b32f3e..030bff0 100644 --- a/wal/store_test.go +++ b/wal/store_test.go @@ -4,6 +4,7 @@ import ( "io/ioutil" "os" "testing" + "time" ) func TestGoLevelDBStore(t *testing.T) { @@ -103,12 +104,12 @@ func testLogs(t *testing.T, l Store) { } // Delete a suffix - if err := l.DeleteRange(5, 20); err != nil { + if err := l.Purge(5); err != nil { t.Fatalf("err: %v ", err) } // Verify they are all deleted - for i := 5; i <= 20; i++ { + for i := 1; i <= 5; i++ { if err := l.GetLog(uint64(i), &out); err != ErrLogNotFound { t.Fatalf("err: %v ", err) } @@ -119,14 +120,14 @@ func testLogs(t *testing.T, l Store) { if err != nil { t.Fatalf("err: %v ", err) } - if idx != 1 { + if idx != 6 { t.Fatalf("bad idx: %d", idx) } idx, err = l.LastID() if err != nil { t.Fatalf("err: %v ", err) } - if idx != 4 { + if idx != 20 { t.Fatalf("bad idx: %d", idx) } @@ -154,4 +155,35 @@ func testLogs(t *testing.T, l Store) { if idx != 0 { t.Fatalf("bad idx: %d", idx) } + + now := uint32(time.Now().Unix()) + logs = []*Log{} + for i := 1; i <= 20; i++ { + nl := &Log{ + ID: uint64(i), + CreateTime: now - 20, + Data: []byte("first"), + } + logs = append(logs, nl) + } + + if err := l.PurgeExpired(1); err != nil { + t.Fatal(err) + } + + idx, err = l.FirstID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 0 { + t.Fatalf("bad idx: %d", idx) + } + + idx, err = l.LastID() + if err != nil { + t.Fatalf("err: %v ", err) + } + if idx != 0 { + t.Fatalf("bad idx: %d", idx) + } } diff --git a/wal/wal.go b/wal/wal.go index cc18870..b879619 100644 --- a/wal/wal.go +++ b/wal/wal.go @@ -2,6 +2,8 @@ package wal import ( "errors" + "github.com/siddontang/ledisdb/config" + "path" ) const ( @@ -26,11 +28,25 @@ type Store interface { StoreLog(log *Log) error StoreLogs(logs []*Log) error - // Delete logs [start, stop] - DeleteRange(start, stop uint64) error + // Delete first n logs + Purge(n uint64) error + + // Delete logs before n seconds + PurgeExpired(n int) error // Clear all logs Clear() error Close() error } + +func NewStore(cfg *config.Config) (Store, error) { + //now we only support goleveldb + + base := cfg.WAL.Path + if len(base) == 0 { + base = path.Join(cfg.DataDir, "wal") + } + + return NewGoLevelDBStore(base) +} From 8c70bbfdbec7c3c4bc5e8b82c2e91e3466adf05d Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 17 Sep 2014 23:06:42 +0800 Subject: [PATCH 07/42] update, can not run at all --- bootstrap.sh | 2 ++ ledis/batch.go | 34 +++++++++++++++++++++++++++++++--- ledis/const.go | 5 ++++- ledis/ledis.go | 9 +++++---- ledis/meta.go | 36 ++++++++++++++++++++++++++++++++++++ ledis/replication.go | 4 ++-- ledis/scan.go | 20 ++++++++++---------- ledis/t_ttl.go | 4 +++- ledis/tx.go | 2 ++ ledis/util.go | 12 ++++++++++++ 10 files changed, 107 insertions(+), 21 deletions(-) create mode 100644 ledis/meta.go diff --git a/bootstrap.sh b/bootstrap.sh index ee260b7..a93c219 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -15,3 +15,5 @@ go get github.com/ugorji/go/codec go get github.com/BurntSushi/toml go get github.com/siddontang/go-bson/bson + +go get github.com/siddontang/go/num diff --git a/ledis/batch.go b/ledis/batch.go index 6f97457..f754116 100644 --- a/ledis/batch.go +++ b/ledis/batch.go @@ -20,29 +20,52 @@ type batch struct { } func (b *batch) Commit() error { + if b.l.replMode { + return ErrWriteInReplMode + } + b.l.commitLock.Lock() defer b.l.commitLock.Unlock() + if b.LogEanbled() { + + } + err := b.WriteBatch.Commit() return err } +// only use in expire cycle +func (b *batch) expireCommit() error { + b.l.commitLock.Lock() + defer b.l.commitLock.Unlock() + + return b.WriteBatch.Commit() +} + func (b *batch) Lock() { b.Locker.Lock() } func (b *batch) Unlock() { b.noLogging = false + b.eb.Reset() b.WriteBatch.Rollback() b.Locker.Unlock() } func (b *batch) Put(key []byte, value []byte) { + if b.LogEanbled() { + b.eb.Put(key, value) + } b.WriteBatch.Put(key, value) } func (b *batch) Delete(key []byte) { + if b.LogEanbled() { + b.eb.Delete(key) + } b.WriteBatch.Delete(key) } @@ -51,8 +74,9 @@ func (b *batch) LogEanbled() bool { return !b.noLogging && b.l.log != nil } -func (b *batch) DisableLog(d bool) { - b.noLogging = d +// only for expire cycle +func (b *batch) disableLog() { + b.noLogging = true } type dbBatchLocker struct { @@ -90,7 +114,11 @@ func (l *Ledis) newBatch(wb store.WriteBatch, locker sync.Locker, tx *Tx) *batch b.tx = tx b.Locker = locker - b.eb = new(eventBatch) + if tx != nil { + b.eb = tx.eb + } else { + b.eb = new(eventBatch) + } b.noLogging = false return b diff --git a/ledis/const.go b/ledis/const.go index 9108736..7ed8b05 100644 --- a/ledis/const.go +++ b/ledis/const.go @@ -23,6 +23,8 @@ const ( ExpTimeType byte = 101 ExpMetaType byte = 102 + + MetaType byte = 201 ) var ( @@ -78,7 +80,8 @@ const ( ) var ( - ErrScoreMiss = errors.New("zset score miss") + ErrScoreMiss = errors.New("zset score miss") + ErrWriteInReplMode = errors.New("write in replication mode") ) const ( diff --git a/ledis/ledis.go b/ledis/ledis.go index c4ac42f..666829d 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -24,7 +24,7 @@ type Ledis struct { wLock sync.RWMutex //allow one write at same time commitLock sync.Mutex //allow one write commit at same time - readOnly bool + replMode bool } func Open(cfg *config.Config) (*Ledis, error) { @@ -89,9 +89,10 @@ func (l *Ledis) FlushAll() error { return nil } -// very dangerous to use -func (l *Ledis) DataDB() *store.DB { - return l.ldb +// for replication mode, any write operations will fail, +// except clear expired data in expire cycle +func (l *Ledis) SetReplictionMode(b bool) { + l.replMode = b } func (l *Ledis) activeExpireCycle() { diff --git a/ledis/meta.go b/ledis/meta.go new file mode 100644 index 0000000..1aec07e --- /dev/null +++ b/ledis/meta.go @@ -0,0 +1,36 @@ +package ledis + +import ( + "github.com/siddontang/go/num" +) + +var ( + lastCommitIDKey = []byte{} +) + +func init() { + f := func(name string) []byte { + b := make([]byte, 0, 2+len(name)) + b = append(b, []byte{255, MetaType}...) + b = append(b, name...) + return b + } + + lastCommitIDKey = f("last_commit_id") +} + +func (l *Ledis) GetLastCommitID() (uint64, error) { + return Uint64(l.ldb.Get(lastCommitIDKey)) +} + +func (l *Ledis) GetLastLogID() (uint64, error) { + if l.log == nil { + return 0, nil + } + + return l.log.LastID() +} + +func setLastCommitID(t *batch, id uint64) { + t.Put(lastCommitIDKey, num.Uint64ToBytes(id)) +} diff --git a/ledis/replication.go b/ledis/replication.go index 804573d..fa12f1b 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -5,7 +5,7 @@ import ( "bytes" "errors" "github.com/siddontang/go-log/log" - "github.com/siddontang/ledisdb/store/driver" + "github.com/siddontang/ledisdb/store" "io" "os" "time" @@ -26,7 +26,7 @@ var ( ) type replBatch struct { - wb driver.IWriteBatch + wb store.WriteBatch events [][]byte l *Ledis diff --git a/ledis/scan.go b/ledis/scan.go index 09e2b5c..f7fca13 100644 --- a/ledis/scan.go +++ b/ledis/scan.go @@ -24,17 +24,17 @@ func (db *DB) scan(dataType byte, key []byte, count int, inclusive bool, match s if err = checkKeySize(key); err != nil { return nil, err } - if minKey, err = db.encodeMetaKey(dataType, key); err != nil { + if minKey, err = db.encodeScanKey(dataType, key); err != nil { return nil, err } } else { - if minKey, err = db.encodeMinKey(dataType); err != nil { + if minKey, err = db.encodeScanMinKey(dataType); err != nil { return nil, err } } - if maxKey, err = db.encodeMaxKey(dataType); err != nil { + if maxKey, err = db.encodeScanMaxKey(dataType); err != nil { return nil, err } @@ -54,7 +54,7 @@ func (db *DB) scan(dataType byte, key []byte, count int, inclusive bool, match s } for i := 0; it.Valid() && i < count && bytes.Compare(it.RawKey(), maxKey) < 0; it.Next() { - if k, err := db.decodeMetaKey(dataType, it.Key()); err != nil { + if k, err := db.decodeScanKey(dataType, it.Key()); err != nil { continue } else if r != nil && !r.Match(k) { continue @@ -67,12 +67,12 @@ func (db *DB) scan(dataType byte, key []byte, count int, inclusive bool, match s return v, nil } -func (db *DB) encodeMinKey(dataType byte) ([]byte, error) { - return db.encodeMetaKey(dataType, nil) +func (db *DB) encodeScanMinKey(dataType byte) ([]byte, error) { + return db.encodeScanKey(dataType, nil) } -func (db *DB) encodeMaxKey(dataType byte) ([]byte, error) { - k, err := db.encodeMetaKey(dataType, nil) +func (db *DB) encodeScanMaxKey(dataType byte) ([]byte, error) { + k, err := db.encodeScanKey(dataType, nil) if err != nil { return nil, err } @@ -80,7 +80,7 @@ func (db *DB) encodeMaxKey(dataType byte) ([]byte, error) { return k, nil } -func (db *DB) encodeMetaKey(dataType byte, key []byte) ([]byte, error) { +func (db *DB) encodeScanKey(dataType byte, key []byte) ([]byte, error) { switch dataType { case KVType: return db.encodeKVKey(key), nil @@ -98,7 +98,7 @@ func (db *DB) encodeMetaKey(dataType byte, key []byte) ([]byte, error) { return nil, errDataType } } -func (db *DB) decodeMetaKey(dataType byte, ek []byte) ([]byte, error) { +func (db *DB) decodeScanKey(dataType byte, ek []byte) ([]byte, error) { if len(ek) < 2 || ek[0] != db.index || ek[1] != dataType { return nil, errMetaKey } diff --git a/ledis/t_ttl.go b/ledis/t_ttl.go index 3d12606..a2e9cba 100644 --- a/ledis/t_ttl.go +++ b/ledis/t_ttl.go @@ -174,6 +174,8 @@ func (eli *elimination) active() { t.Lock() + t.disableLog() + if exp, err := Int64(dbGet(mk)); err == nil { // check expire again if exp <= now { @@ -181,7 +183,7 @@ func (eli *elimination) active() { t.Delete(tk) t.Delete(mk) - t.Commit() + t.expireCommit() } } diff --git a/ledis/tx.go b/ledis/tx.go index 2d96bd3..2966c16 100644 --- a/ledis/tx.go +++ b/ledis/tx.go @@ -15,6 +15,8 @@ type Tx struct { *DB tx *store.Tx + + eb *eventBatch } func (db *DB) IsTransaction() bool { diff --git a/ledis/util.go b/ledis/util.go index 770bca1..258c972 100644 --- a/ledis/util.go +++ b/ledis/util.go @@ -43,6 +43,18 @@ func Int64(v []byte, err error) (int64, error) { return int64(binary.LittleEndian.Uint64(v)), nil } +func Uint64(v []byte, err error) (uint64, error) { + if err != nil { + return 0, err + } else if v == nil || len(v) == 0 { + return 0, nil + } else if len(v) != 8 { + return 0, errIntNumber + } + + return binary.LittleEndian.Uint64(v), nil +} + func PutInt64(v int64) []byte { var b []byte pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) From 2972f57436b03888ea091ae3d78fdcf2c533311d Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 18 Sep 2014 22:20:18 +0800 Subject: [PATCH 08/42] remove DataDB --- ledis/ledis.go | 5 ---- server/cmd_replication_test.go | 43 ++++++++++++++++++++-------------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/ledis/ledis.go b/ledis/ledis.go index 30e6ac7..2d52c11 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -90,11 +90,6 @@ func (l *Ledis) FlushAll() error { return nil } -// very dangerous to use -func (l *Ledis) DataDB() *store.DB { - return l.ldb -} - func (l *Ledis) activeExpireCycle() { var executors []*elimination = make([]*elimination, len(l.dbs)) for i, db := range l.dbs { diff --git a/server/cmd_replication_test.go b/server/cmd_replication_test.go index 3e7e285..5d20ff5 100644 --- a/server/cmd_replication_test.go +++ b/server/cmd_replication_test.go @@ -1,25 +1,32 @@ package server import ( - "bytes" "fmt" "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store" "os" + "reflect" "testing" "time" ) func checkDataEqual(master *App, slave *App) error { - it := master.ldb.DataDB().RangeLimitIterator(nil, nil, store.RangeClose, 0, -1) - for ; it.Valid(); it.Next() { - key := it.Key() - value := it.Value() + mdb, _ := master.ldb.Select(0) + sdb, _ := slave.ldb.Select(0) - if v, err := slave.ldb.DataDB().Get(key); err != nil { - return err - } else if !bytes.Equal(v, value) { - return fmt.Errorf("replication error %d != %d", len(v), len(value)) + mkeys, _ := mdb.Scan(nil, 100, true, "") + skeys, _ := sdb.Scan(nil, 100, true, "") + + if len(mkeys) != len(skeys) { + return fmt.Errorf("keys number not equal") + } else if !reflect.DeepEqual(mkeys, skeys) { + return fmt.Errorf("keys not equal") + } else { + for _, k := range mkeys { + v1, _ := mdb.Get(k) + v2, _ := sdb.Get(k) + if !reflect.DeepEqual(v1, v2) { + return fmt.Errorf("value not equal") + } } } @@ -64,8 +71,8 @@ func TestReplication(t *testing.T) { db.Set([]byte("a"), value) db.Set([]byte("b"), value) - db.HSet([]byte("a"), []byte("1"), value) - db.HSet([]byte("b"), []byte("2"), value) + db.Set([]byte("c"), value) + db.Set([]byte("d"), value) go slave.Run() @@ -77,8 +84,8 @@ func TestReplication(t *testing.T) { db.Set([]byte("a1"), value) db.Set([]byte("b1"), value) - db.HSet([]byte("a1"), []byte("1"), value) - db.HSet([]byte("b1"), []byte("2"), value) + db.Set([]byte("c1"), value) + db.Set([]byte("d1"), value) time.Sleep(1 * time.Second) if err = checkDataEqual(master, slave); err != nil { @@ -89,13 +96,13 @@ func TestReplication(t *testing.T) { db.Set([]byte("a2"), value) db.Set([]byte("b2"), value) - db.HSet([]byte("a2"), []byte("1"), value) - db.HSet([]byte("b2"), []byte("2"), value) + db.Set([]byte("c2"), value) + db.Set([]byte("d2"), value) db.Set([]byte("a3"), value) db.Set([]byte("b3"), value) - db.HSet([]byte("a3"), []byte("1"), value) - db.HSet([]byte("b3"), []byte("2"), value) + db.Set([]byte("c3"), value) + db.Set([]byte("d3"), value) if err = checkDataEqual(master, slave); err == nil { t.Fatal("must error") From e3bdb57be219ecb297500ca1f56b600183c6eaf8 Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 18 Sep 2014 22:30:33 +0800 Subject: [PATCH 09/42] add DBPath and UseBinLog config --- config/config.go | 5 ++++- config/config.toml | 6 ++++++ config/config_test.go | 1 + ledis/ledis.go | 2 +- ledis/replication_test.go | 1 + server/cmd_replication_test.go | 3 +-- store/store.go | 6 +++++- 7 files changed, 19 insertions(+), 5 deletions(-) diff --git a/config/config.go b/config/config.go index ca93d29..c5736ee 100644 --- a/config/config.go +++ b/config/config.go @@ -51,11 +51,14 @@ type Config struct { DBName string `toml:"db_name"` + DBPath string `toml:"db_path"` + LevelDB LevelDBConfig `toml:"leveldb"` LMDB LMDBConfig `toml:"lmdb"` - BinLog BinLogConfig `toml:"binlog"` + UseBinLog bool `toml:"use_binlog"` + BinLog BinLogConfig `toml:"binlog"` SlaveOf string `toml:"slaveof"` diff --git a/config/config.toml b/config/config.toml index 2a3a246..29e5c5d 100644 --- a/config/config.toml +++ b/config/config.toml @@ -27,6 +27,12 @@ slaveof = "" # db_name = "leveldb" +# If not set, use data_dir/"db_name"_data +db_path = "" + +# enable binlog or not +use_binlog = true + [leveldb] compression = false block_size = 32768 diff --git a/config/config_test.go b/config/config_test.go index 218ba0f..70b4c9c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -11,6 +11,7 @@ func TestConfig(t *testing.T) { dstCfg.HttpAddr = "127.0.0.1:11181" dstCfg.DataDir = "/tmp/ledis_server" dstCfg.DBName = "leveldb" + dstCfg.UseBinLog = true dstCfg.LevelDB.Compression = false dstCfg.LevelDB.BlockSize = 32768 diff --git a/ledis/ledis.go b/ledis/ledis.go index 2d52c11..bb46152 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -41,7 +41,7 @@ func Open(cfg *config.Config) (*Ledis, error) { l.ldb = ldb - if cfg.BinLog.MaxFileNum > 0 && cfg.BinLog.MaxFileSize > 0 { + if cfg.UseBinLog { println("binlog will be refactored later, use your own risk!!!") l.binlog, err = NewBinLog(cfg) if err != nil { diff --git a/ledis/replication_test.go b/ledis/replication_test.go index 07643c6..8515229 100644 --- a/ledis/replication_test.go +++ b/ledis/replication_test.go @@ -34,6 +34,7 @@ func TestReplication(t *testing.T) { cfgM := new(config.Config) cfgM.DataDir = "/tmp/test_repl/master" + cfgM.UseBinLog = true cfgM.BinLog.MaxFileNum = 10 cfgM.BinLog.MaxFileSize = 50 diff --git a/server/cmd_replication_test.go b/server/cmd_replication_test.go index 5d20ff5..5b3030c 100644 --- a/server/cmd_replication_test.go +++ b/server/cmd_replication_test.go @@ -40,8 +40,7 @@ func TestReplication(t *testing.T) { masterCfg := new(config.Config) masterCfg.DataDir = fmt.Sprintf("%s/master", data_dir) masterCfg.Addr = "127.0.0.1:11182" - masterCfg.BinLog.MaxFileSize = 1 * 1024 * 1024 - masterCfg.BinLog.MaxFileNum = 10 + masterCfg.UseBinLog = true var master *App var slave *App diff --git a/store/store.go b/store/store.go index aa4b485..2edde30 100644 --- a/store/store.go +++ b/store/store.go @@ -16,7 +16,11 @@ import ( ) func getStorePath(cfg *config.Config) string { - return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName)) + if len(cfg.DBPath) > 0 { + return cfg.DBPath + } else { + return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName)) + } } func Open(cfg *config.Config) (*DB, error) { From 59e974c258b79d4d2eedb1665be652592161e2c7 Mon Sep 17 00:00:00 2001 From: siddontang Date: Mon, 22 Sep 2014 22:03:44 +0800 Subject: [PATCH 10/42] update, can not run at all --- ledis/ledis.go | 2 +- ledis/replication.go | 31 +++++++++++++++++++------------ ledis/replication_test.go | 5 +---- rpl/log.go | 8 +++++++- 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/ledis/ledis.go b/ledis/ledis.go index e33ad74..50e9f16 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -58,7 +58,7 @@ func Open2(cfg *config.Config, flags int) (*Ledis, error) { return nil, err } - l.rc = make(chan struct{}) + l.rc = make(chan struct{}, 1) l.rbatch = l.ldb.NewWriteBatch() go l.onReplication() diff --git a/ledis/replication.go b/ledis/replication.go index 3473e6b..8763574 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -19,8 +19,11 @@ var ( ) func (l *Ledis) handleReplication() { + l.commitLock.Lock() + defer l.commitLock.Unlock() + l.rwg.Add(1) - var rl *rpl.Log + rl := &rpl.Log{} for { if err := l.r.NextCommitLog(rl); err != nil { if err != rpl.ErrNoBehindLog { @@ -59,33 +62,37 @@ func (l *Ledis) onReplication() { } func (l *Ledis) WaitReplication() error { - l.rwg.Wait() + b, err := l.r.CommitIDBehind() + if err != nil { + return err + } else if b { + l.rc <- struct{}{} + l.rwg.Wait() + } return nil } -func (l *Ledis) StoreLogsFromReader(rb io.Reader) (uint64, error) { +func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { if l.r == nil { - return 0, fmt.Errorf("replication not enable") + return fmt.Errorf("replication not enable") } - var log *rpl.Log - var n uint64 + log := &rpl.Log{} for { if err := log.Decode(rb); err != nil { if err == io.EOF { break } else { - return 0, err + return err } } if err := l.r.StoreLog(log); err != nil { - return 0, err + return err } - n = log.ID } select { @@ -94,10 +101,10 @@ func (l *Ledis) StoreLogsFromReader(rb io.Reader) (uint64, error) { break } - return n, nil + return nil } -func (l *Ledis) StoreLogsFromData(data []byte) (uint64, error) { +func (l *Ledis) StoreLogsFromData(data []byte) error { rb := bytes.NewReader(data) return l.StoreLogsFromReader(rb) @@ -127,7 +134,7 @@ func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uin return } - var log *rpl.Log + log := &rpl.Log{} for i := startLogID; i <= lastID; i++ { if err = l.r.GetLog(i, log); err != nil { return diff --git a/ledis/replication_test.go b/ledis/replication_test.go index cc3a392..4cef10d 100644 --- a/ledis/replication_test.go +++ b/ledis/replication_test.go @@ -81,17 +81,14 @@ func TestReplication(t *testing.T) { var buf bytes.Buffer var n int var id uint64 = 1 - var nid uint64 for { buf.Reset() n, id, err = master.ReadLogsTo(id, &buf) if err != nil { t.Fatal(err) } else if n != 0 { - if nid, err = slave.StoreLogsFromReader(&buf); err != nil { + if err = slave.StoreLogsFromReader(&buf); err != nil { t.Fatal(err) - } else if nid != id { - t.Fatal(nid, id) } } else if n == 0 { break diff --git a/rpl/log.go b/rpl/log.go index ad637ca..775ea5d 100644 --- a/rpl/log.go +++ b/rpl/log.go @@ -90,7 +90,13 @@ func (l *Log) Decode(r io.Reader) error { length := binary.BigEndian.Uint32(buf[pos:]) - l.Data = make([]byte, length) + l.Data = l.Data[0:0] + + if cap(l.Data) >= int(length) { + l.Data = l.Data[0:length] + } else { + l.Data = make([]byte, length) + } if _, err := io.ReadFull(r, l.Data); err != nil { return err } From 1a1250d9495393c2377b476b89e963dc2cf57e39 Mon Sep 17 00:00:00 2001 From: siddontang Date: Tue, 23 Sep 2014 17:28:09 +0800 Subject: [PATCH 11/42] refactor, remove bin log, add consensus replication --- cmd/ledis-load/main.go | 1 - config/config.go | 8 +- config/config.toml | 12 +- config/config_test.go | 23 +-- doc/commands.md | 2 +- ledis/batch.go | 2 + ledis/const.go | 6 +- ledis/dump.go | 24 +--- ledis/ledis.go | 49 ++++++- ledis/replication.go | 78 +++++++--- ledis/replication_test.go | 6 +- ledis/t_kv.go | 1 - ledis/util.go | 17 +++ rpl/rpl.go | 77 +++++++--- rpl/rpl_test.go | 1 - server/app.go | 9 ++ server/client.go | 12 +- server/client_resp.go | 5 +- server/cmd_replication.go | 35 ++--- server/cmd_replication_test.go | 18 ++- server/doc.go | 3 +- server/replication.go | 254 +++++++++++++++------------------ 22 files changed, 364 insertions(+), 279 deletions(-) diff --git a/cmd/ledis-load/main.go b/cmd/ledis-load/main.go index 57e85fb..b9b6a9c 100644 --- a/cmd/ledis-load/main.go +++ b/cmd/ledis-load/main.go @@ -2,7 +2,6 @@ package main import ( "flag" - "fmt" "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/ledis" ) diff --git a/config/config.go b/config/config.go index eb0f0fc..e2381a7 100644 --- a/config/config.go +++ b/config/config.go @@ -30,9 +30,10 @@ type LMDBConfig struct { } type ReplicationConfig struct { - Use bool `toml:"use"` Path string `toml:"path"` ExpiredLogDays int `toml:"expired_log_days"` + Sync bool `toml:"sync"` + WaitSyncTime int `toml:"wait_sync_time"` } type Config struct { @@ -54,7 +55,8 @@ type Config struct { AccessLog string `toml:"access_log"` - Replication ReplicationConfig `toml:"replication"` + UseReplication bool `toml:"use_replication"` + Replication ReplicationConfig `toml:"replication"` } func NewConfigWithFile(fileName string) (*Config, error) { @@ -95,6 +97,8 @@ func NewConfigDefault() *Config { cfg.LMDB.MapSize = 20 * 1024 * 1024 cfg.LMDB.NoSync = true + cfg.Replication.WaitSyncTime = 1 + return cfg } diff --git a/config/config.toml b/config/config.toml index 848f52a..ac0aab9 100644 --- a/config/config.toml +++ b/config/config.toml @@ -30,6 +30,9 @@ db_name = "leveldb" # If not set, use data_dir/"db_name"_data db_path = "" +# enable replication or not +use_replication = true + [leveldb] compression = false block_size = 32768 @@ -42,9 +45,6 @@ map_size = 524288000 nosync = true [replication] -# enable replication or not -use = true - # Path to store replication information(write ahead log, commit log, etc.) # if not set, use data_dir/rpl path = "" @@ -52,4 +52,10 @@ path = "" # Expire write ahead logs after the given days expired_log_days = 7 +# If sync is true, the new log must be sent to some slaves, and then commit. +# It may affect performance. +sync = true + +# If sync is true, wait at last wait_sync_time seconds to check whether slave sync this log +wait_sync_time = 1 diff --git a/config/config_test.go b/config/config_test.go index ff98a7b..c2b5a16 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,34 +1,13 @@ package config import ( - "reflect" "testing" ) func TestConfig(t *testing.T) { - dstCfg := new(Config) - dstCfg.Addr = "127.0.0.1:6380" - dstCfg.HttpAddr = "127.0.0.1:11181" - dstCfg.DataDir = "/tmp/ledis_server" - dstCfg.DBName = "leveldb" - - dstCfg.LevelDB.Compression = false - dstCfg.LevelDB.BlockSize = 32768 - dstCfg.LevelDB.WriteBufferSize = 67108864 - dstCfg.LevelDB.CacheSize = 524288000 - dstCfg.LevelDB.MaxOpenFiles = 1024 - dstCfg.LMDB.MapSize = 524288000 - dstCfg.LMDB.NoSync = true - - dstCfg.Replication.Use = true - dstCfg.Replication.ExpiredLogDays = 7 - - cfg, err := NewConfigWithFile("./config.toml") + _, err := NewConfigWithFile("./config.toml") if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(dstCfg, cfg) { - t.Fatal("parse toml error") - } } diff --git a/doc/commands.md b/doc/commands.md index 99c8d7e..0317384 100644 --- a/doc/commands.md +++ b/doc/commands.md @@ -2474,7 +2474,7 @@ ERR invalid db index 16 ### FLUSHALL -Delete all the keys of all the existing databases, not just the currently selected one. This command never fails. +Delete all the keys of all the existing databases and replication logs, not just the currently selected one. This command never fails. Very dangerous to use!!! diff --git a/ledis/batch.go b/ledis/batch.go index 9086a8b..0d7f2d6 100644 --- a/ledis/batch.go +++ b/ledis/batch.go @@ -33,6 +33,8 @@ func (b *batch) Commit() error { return err } + b.l.propagate(l) + if err = b.WriteBatch.Commit(); err != nil { log.Fatal("commit error %s", err.Error()) return err diff --git a/ledis/const.go b/ledis/const.go index 9ad7033..7144629 100644 --- a/ledis/const.go +++ b/ledis/const.go @@ -85,8 +85,10 @@ const ( ) var ( - ErrScoreMiss = errors.New("zset score miss") - ErrWriteInROnly = errors.New("write in readonly mode") + ErrScoreMiss = errors.New("zset score miss") + ErrWriteInROnly = errors.New("write not support in readonly mode") + ErrRplInRDWR = errors.New("replication not support in read write mode") + ErrRplNotSupport = errors.New("replication not support") ) const ( diff --git a/ledis/dump.go b/ledis/dump.go index 9b5e439..bf40571 100644 --- a/ledis/dump.go +++ b/ledis/dump.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "encoding/binary" - "github.com/siddontang/go-log/log" "github.com/siddontang/go-snappy/snappy" "github.com/siddontang/ledisdb/store" "io" @@ -126,34 +125,13 @@ func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) { return l.LoadDump(f) } -func (l *Ledis) clearAllWhenLoad() error { - it := l.ldb.NewIterator() - defer it.Close() - - w := l.ldb.NewWriteBatch() - defer w.Rollback() - - n := 0 - for ; it.Valid(); it.Next() { - n++ - if n == 10000 { - w.Commit() - n = 0 - } - w.Delete(it.RawKey()) - } - - return w.Commit() -} - // clear all data and load dump file to db func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) { l.wLock.Lock() defer l.wLock.Unlock() var err error - if err = l.clearAllWhenLoad(); err != nil { - log.Fatal("clear all error when loaddump, err :%s", err.Error()) + if err = l.flushAll(); err != nil { return nil, err } diff --git a/ledis/ledis.go b/ledis/ledis.go index 50e9f16..2660701 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -19,10 +19,12 @@ type Ledis struct { quit chan struct{} wg sync.WaitGroup + //for replication r *rpl.Replication rc chan struct{} rbatch store.WriteBatch rwg sync.WaitGroup + rhs []NewLogEventHandler wLock sync.RWMutex //allow one write at same time commitLock sync.Mutex //allow one write commit at same time @@ -53,15 +55,19 @@ func Open2(cfg *config.Config, flags int) (*Ledis, error) { l.ldb = ldb - if cfg.Replication.Use { + if cfg.UseReplication { if l.r, err = rpl.NewReplication(cfg); err != nil { return nil, err } - l.rc = make(chan struct{}, 1) + l.rc = make(chan struct{}, 8) l.rbatch = l.ldb.NewWriteBatch() go l.onReplication() + + //first we must try wait all replication ok + //maybe some logs are not committed + l.WaitReplication() } else { l.r = nil } @@ -95,10 +101,43 @@ func (l *Ledis) Select(index int) (*DB, error) { return l.dbs[index], nil } +// Flush All will clear all data and replication logs func (l *Ledis) FlushAll() error { - for index, db := range l.dbs { - if _, err := db.FlushAll(); err != nil { - log.Error("flush db %d error %s", index, err.Error()) + l.wLock.Lock() + defer l.wLock.Unlock() + + return l.flushAll() +} + +func (l *Ledis) flushAll() error { + it := l.ldb.NewIterator() + defer it.Close() + + w := l.ldb.NewWriteBatch() + defer w.Rollback() + + n := 0 + for ; it.Valid(); it.Next() { + n++ + if n == 10000 { + if err := w.Commit(); err != nil { + log.Fatal("flush all commit error: %s", err.Error()) + return err + } + n = 0 + } + w.Delete(it.RawKey()) + } + + if err := w.Commit(); err != nil { + log.Fatal("flush all commit error: %s", err.Error()) + return err + } + + if l.r != nil { + if err := l.r.Clear(); err != nil { + log.Fatal("flush all replication clear error: %s", err.Error()) + return err } } diff --git a/ledis/replication.go b/ledis/replication.go index 8763574..4937952 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -3,7 +3,6 @@ package ledis import ( "bytes" "errors" - "fmt" "github.com/siddontang/go-log/log" "github.com/siddontang/ledisdb/rpl" "io" @@ -18,6 +17,10 @@ var ( ErrLogMissed = errors.New("log is pured in server") ) +func (l *Ledis) ReplicationUsed() bool { + return l.r != nil +} + func (l *Ledis) handleReplication() { l.commitLock.Lock() defer l.commitLock.Unlock() @@ -25,7 +28,7 @@ func (l *Ledis) handleReplication() { l.rwg.Add(1) rl := &rpl.Log{} for { - if err := l.r.NextCommitLog(rl); err != nil { + if err := l.r.NextNeedCommitLog(rl); err != nil { if err != rpl.ErrNoBehindLog { log.Error("get next commit log err, %s", err.Error) } else { @@ -47,9 +50,7 @@ func (l *Ledis) handleReplication() { } func (l *Ledis) onReplication() { - if l.r == nil { - return - } + AsyncNotify(l.rc) for { select { @@ -62,11 +63,19 @@ func (l *Ledis) onReplication() { } func (l *Ledis) WaitReplication() error { + if !l.ReplicationUsed() { + return ErrRplNotSupport + + } + AsyncNotify(l.rc) + + l.rwg.Wait() + b, err := l.r.CommitIDBehind() if err != nil { return err } else if b { - l.rc <- struct{}{} + AsyncNotify(l.rc) l.rwg.Wait() } @@ -74,8 +83,10 @@ func (l *Ledis) WaitReplication() error { } func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { - if l.r == nil { - return fmt.Errorf("replication not enable") + if !l.ReplicationUsed() { + return ErrRplNotSupport + } else if !l.readOnly { + return ErrRplInRDWR } log := &rpl.Log{} @@ -95,11 +106,7 @@ func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { } - select { - case l.rc <- struct{}{}: - default: - break - } + AsyncNotify(l.rc) return nil } @@ -111,9 +118,10 @@ func (l *Ledis) StoreLogsFromData(data []byte) error { } func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uint64, err error) { - if l.r == nil { + if !l.ReplicationUsed() { // no replication log nextLogID = 0 + err = ErrRplNotSupport return } @@ -134,6 +142,8 @@ func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uin return } + nextLogID = startLogID + log := &rpl.Log{} for i := startLogID; i <= lastID; i++ { if err = l.r.GetLog(i, log); err != nil { @@ -161,14 +171,48 @@ func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int) ( n, nextLogID, err = l.ReadLogsTo(startLogID, w) if err != nil { return - } else if n == 0 || nextLogID == 0 { + } else if n != 0 { return } //no events read select { - //case <-l.binlog.Wait(): + case <-l.r.WaitLog(): case <-time.After(time.Duration(timeout) * time.Second): } return l.ReadLogsTo(startLogID, w) - +} + +func (l *Ledis) NextSyncLogID() (uint64, error) { + if !l.ReplicationUsed() { + return 0, ErrRplNotSupport + } + + s, err := l.r.Stat() + if err != nil { + return 0, err + } + + if s.LastID > s.CommitID { + return s.LastID + 1, nil + } else { + return s.CommitID + 1, nil + } +} + +func (l *Ledis) propagate(rl *rpl.Log) { + for _, h := range l.rhs { + h(rl) + } +} + +type NewLogEventHandler func(rl *rpl.Log) + +func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error { + if !l.ReplicationUsed() { + return ErrRplNotSupport + } + + l.rhs = append(l.rhs, h) + + return nil } diff --git a/ledis/replication_test.go b/ledis/replication_test.go index 4cef10d..6e277a9 100644 --- a/ledis/replication_test.go +++ b/ledis/replication_test.go @@ -33,7 +33,7 @@ func TestReplication(t *testing.T) { cfgM := new(config.Config) cfgM.DataDir = "/tmp/test_repl/master" - cfgM.Replication.Use = true + cfgM.UseReplication = true os.RemoveAll(cfgM.DataDir) @@ -44,11 +44,11 @@ func TestReplication(t *testing.T) { cfgS := new(config.Config) cfgS.DataDir = "/tmp/test_repl/slave" - cfgS.Replication.Use = true + cfgS.UseReplication = true os.RemoveAll(cfgS.DataDir) - slave, err = Open(cfgS) + slave, err = Open2(cfgS, ROnlyMode) if err != nil { t.Fatal(err) } diff --git a/ledis/t_kv.go b/ledis/t_kv.go index fd13436..ce0874a 100644 --- a/ledis/t_kv.go +++ b/ledis/t_kv.go @@ -183,7 +183,6 @@ func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) { } t.Put(key, value) - //todo, binlog err = t.Commit() diff --git a/ledis/util.go b/ledis/util.go index 258c972..b245f64 100644 --- a/ledis/util.go +++ b/ledis/util.go @@ -74,6 +74,16 @@ func StrInt64(v []byte, err error) (int64, error) { } } +func StrUint64(v []byte, err error) (uint64, error) { + if err != nil { + return 0, err + } else if v == nil { + return 0, nil + } else { + return strconv.ParseUint(String(v), 10, 64) + } +} + func StrInt32(v []byte, err error) (int32, error) { if err != nil { return 0, err @@ -123,3 +133,10 @@ func MaxInt32(a int32, b int32) int32 { return b } } + +func AsyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} diff --git a/rpl/rpl.go b/rpl/rpl.go index 827e825..8227371 100644 --- a/rpl/rpl.go +++ b/rpl/rpl.go @@ -2,7 +2,6 @@ package rpl import ( "encoding/binary" - "fmt" "github.com/siddontang/go-log/log" "github.com/siddontang/ledisdb/config" "os" @@ -11,6 +10,12 @@ import ( "time" ) +type Stat struct { + FirstID uint64 + LastID uint64 + CommitID uint64 +} + type Replication struct { m sync.Mutex @@ -24,13 +29,11 @@ type Replication struct { quit chan struct{} wg sync.WaitGroup + + nc chan struct{} } func NewReplication(cfg *config.Config) (*Replication, error) { - if !cfg.Replication.Use { - return nil, fmt.Errorf("replication not enalbed") - } - if len(cfg.Replication.Path) == 0 { cfg.Replication.Path = path.Join(cfg.DataDir, "rpl") } @@ -40,6 +43,7 @@ func NewReplication(cfg *config.Config) (*Replication, error) { r := new(Replication) r.quit = make(chan struct{}) + r.nc = make(chan struct{}) r.cfg = cfg @@ -105,9 +109,16 @@ func (r *Replication) Log(data []byte) (*Log, error) { return nil, err } + close(r.nc) + r.nc = make(chan struct{}) + return l, nil } +func (r *Replication) WaitLog() <-chan struct{} { + return r.nc +} + func (r *Replication) StoreLog(log *Log) error { return r.StoreLogs([]*Log{log}) } @@ -133,22 +144,6 @@ func (r *Replication) LastLogID() (uint64, error) { return id, err } -func (r *Replication) NextSyncID() (uint64, error) { - r.m.Lock() - defer r.m.Unlock() - - lastId, err := r.s.LastID() - if err != nil { - return 0, err - } - - if lastId > r.commitID { - return lastId + 1, nil - } else { - return r.commitID + 1, nil - } -} - func (r *Replication) LastCommitID() (uint64, error) { r.m.Lock() id := r.commitID @@ -160,6 +155,29 @@ func (r *Replication) UpdateCommitID(id uint64) error { r.m.Lock() defer r.m.Unlock() + return r.updateCommitID(id) +} + +func (r *Replication) Stat() (*Stat, error) { + r.m.Lock() + defer r.m.Unlock() + + s := &Stat{} + var err error + + if s.FirstID, err = r.s.FirstID(); err != nil { + return nil, err + } + + if s.LastID, err = r.s.LastID(); err != nil { + return nil, err + } + + s.CommitID = r.commitID + return s, nil +} + +func (r *Replication) updateCommitID(id uint64) error { if _, err := r.commitLog.Seek(0, os.SEEK_SET); err != nil { return err } @@ -189,7 +207,7 @@ func (r *Replication) GetLog(id uint64, log *Log) error { return r.s.GetLog(id, log) } -func (r *Replication) NextCommitLog(log *Log) error { +func (r *Replication) NextNeedCommitLog(log *Log) error { r.m.Lock() defer r.m.Unlock() @@ -206,6 +224,21 @@ func (r *Replication) NextCommitLog(log *Log) error { } +func (r *Replication) Clear() error { + return r.ClearWithCommitID(0) +} + +func (r *Replication) ClearWithCommitID(id uint64) error { + r.m.Lock() + defer r.m.Unlock() + + if err := r.s.Clear(); err != nil { + return err + } + + return r.updateCommitID(id) +} + func (r *Replication) onPurgeExpired() { r.wg.Add(1) defer r.wg.Done() diff --git a/rpl/rpl_test.go b/rpl/rpl_test.go index 596f3b2..06fcf7d 100644 --- a/rpl/rpl_test.go +++ b/rpl/rpl_test.go @@ -15,7 +15,6 @@ func TestReplication(t *testing.T) { defer os.RemoveAll(dir) c := new(config.Config) - c.Replication.Use = true c.Replication.Path = dir r, err := NewReplication(c) diff --git a/server/app.go b/server/app.go index edd65c8..74a0b23 100644 --- a/server/app.go +++ b/server/app.go @@ -7,6 +7,7 @@ import ( "net/http" "path" "strings" + "sync" ) type App struct { @@ -29,6 +30,10 @@ type App struct { info *info s *script + + // handle slaves + slock sync.Mutex + slaves map[*client]struct{} } func netType(s string) string { @@ -53,6 +58,8 @@ func NewApp(cfg *config.Config) (*App, error) { app.cfg = cfg + app.slaves = make(map[*client]struct{}) + var err error if app.info, err = newInfo(app); err != nil { @@ -89,6 +96,8 @@ func NewApp(cfg *config.Config) (*App, error) { app.openScript() + app.ldb.AddNewLogEventHandler(app.publishNewLog) + return app, nil } diff --git a/server/client.go b/server/client.go index 83b14f6..5bceaef 100644 --- a/server/client.go +++ b/server/client.go @@ -12,9 +12,6 @@ var scriptUnsupportedCmds = map[string]struct{}{ "slaveof": struct{}{}, "fullsync": struct{}{}, "sync": struct{}{}, - "begin": struct{}{}, - "commit": struct{}{}, - "rollback": struct{}{}, "flushall": struct{}{}, "flushdb": struct{}{}, } @@ -32,6 +29,11 @@ type responseWriter interface { flush() } +type syncAck struct { + id uint64 + ch chan uint64 +} + type client struct { app *App ldb *ledis.Ledis @@ -47,6 +49,10 @@ type client struct { syncBuf bytes.Buffer compressBuf []byte + lastSyncLogID uint64 + + ack *syncAck + reqErr chan error buf bytes.Buffer diff --git a/server/client_resp.go b/server/client_resp.go index e8fb1ff..5d9604c 100644 --- a/server/client_resp.go +++ b/server/client_resp.go @@ -53,10 +53,7 @@ func (c *respClient) run() { c.conn.Close() } - if c.tx != nil { - c.tx.Rollback() - c.tx = nil - } + c.app.removeSlave(c.client) }() for { diff --git a/server/cmd_replication.go b/server/cmd_replication.go index 5fb9fdd..8447941 100644 --- a/server/cmd_replication.go +++ b/server/cmd_replication.go @@ -1,7 +1,6 @@ package server import ( - "encoding/binary" "fmt" "github.com/siddontang/go-snappy/snappy" "github.com/siddontang/ledisdb/ledis" @@ -66,44 +65,36 @@ func fullsyncCommand(c *client) error { return nil } -var reserveInfoSpace = make([]byte, 16) - func syncCommand(c *client) error { args := c.args if len(args) != 1 { return ErrCmdParams } - var logIndex int64 - var logPos int64 + var logId uint64 var err error - logIndex, err = ledis.Str(args[0], nil) - if err != nil { + + if logId, err = ledis.StrUint64(args[0], nil); err != nil { return ErrCmdParams } - logPos, err = ledis.StrInt64(args[1], nil) - if err != nil { - return ErrCmdParams + c.lastSyncLogID = logId - 1 + + if c.ack != nil && logId > c.ack.id { + select { + case c.ack.ch <- logId: + default: + } + c.ack = nil } c.syncBuf.Reset() - //reserve space to write binlog anchor - if _, err := c.syncBuf.Write(reserveInfoSpace); err != nil { - return err - } - - m := &ledis.BinLogAnchor{logIndex, logPos} - - if _, err := c.app.ldb.ReadEventsToTimeout(m, &c.syncBuf, 5); err != nil { + if _, _, err := c.app.ldb.ReadLogsToTimeout(logId, &c.syncBuf, 30); err != nil { return err } else { buf := c.syncBuf.Bytes() - binary.BigEndian.PutUint64(buf[0:], uint64(m.LogFileIndex)) - binary.BigEndian.PutUint64(buf[8:], uint64(m.LogPos)) - if len(c.compressBuf) < snappy.MaxEncodedLen(len(buf)) { c.compressBuf = make([]byte, snappy.MaxEncodedLen(len(buf))) } @@ -115,6 +106,8 @@ func syncCommand(c *client) error { c.resp.writeBulk(buf) } + c.app.addSlave(c) + return nil } diff --git a/server/cmd_replication_test.go b/server/cmd_replication_test.go index 5b3030c..fee81fa 100644 --- a/server/cmd_replication_test.go +++ b/server/cmd_replication_test.go @@ -17,7 +17,7 @@ func checkDataEqual(master *App, slave *App) error { skeys, _ := sdb.Scan(nil, 100, true, "") if len(mkeys) != len(skeys) { - return fmt.Errorf("keys number not equal") + return fmt.Errorf("keys number not equal %d != %d", len(mkeys), len(skeys)) } else if !reflect.DeepEqual(mkeys, skeys) { return fmt.Errorf("keys not equal") } else { @@ -40,7 +40,9 @@ func TestReplication(t *testing.T) { masterCfg := new(config.Config) masterCfg.DataDir = fmt.Sprintf("%s/master", data_dir) masterCfg.Addr = "127.0.0.1:11182" - masterCfg.UseBinLog = true + masterCfg.UseReplication = true + masterCfg.Replication.Sync = true + masterCfg.Replication.WaitSyncTime = 5 var master *App var slave *App @@ -55,6 +57,7 @@ func TestReplication(t *testing.T) { slaveCfg.DataDir = fmt.Sprintf("%s/slave", data_dir) slaveCfg.Addr = "127.0.0.1:11183" slaveCfg.SlaveOf = masterCfg.Addr + slaveCfg.UseReplication = true slave, err = NewApp(slaveCfg) if err != nil { @@ -64,6 +67,9 @@ func TestReplication(t *testing.T) { go master.Run() + time.Sleep(1 * time.Second) + go slave.Run() + db, _ := master.ldb.Select(0) value := make([]byte, 10) @@ -73,10 +79,7 @@ func TestReplication(t *testing.T) { db.Set([]byte("c"), value) db.Set([]byte("d"), value) - go slave.Run() - time.Sleep(1 * time.Second) - if err = checkDataEqual(master, slave); err != nil { t.Fatal(err) } @@ -86,7 +89,9 @@ func TestReplication(t *testing.T) { db.Set([]byte("c1"), value) db.Set([]byte("d1"), value) - time.Sleep(1 * time.Second) + //time.Sleep(1 * time.Second) + slave.ldb.WaitReplication() + if err = checkDataEqual(master, slave); err != nil { t.Fatal(err) } @@ -108,6 +113,7 @@ func TestReplication(t *testing.T) { } slave.slaveof(masterCfg.Addr) + time.Sleep(1 * time.Second) if err = checkDataEqual(master, slave); err != nil { diff --git a/server/doc.go b/server/doc.go index d893a22..7dc47ff 100644 --- a/server/doc.go +++ b/server/doc.go @@ -24,7 +24,8 @@ // ledis-cli -p 6381 // ledis 127.0.0.1:6381 > slaveof 127.0.0.1 6380 // -// After you send slaveof command, the slave will start to sync master's binlog and replicate from binlog. +// After you send slaveof command, the slave will start to sync master's write ahead log and replicate from it. +// You must notice that use_replication must be set true if you want to use it. // // HTTP Interface // diff --git a/server/replication.go b/server/replication.go index 445a813..f05c676 100644 --- a/server/replication.go +++ b/server/replication.go @@ -3,14 +3,12 @@ package server import ( "bufio" "bytes" - "encoding/binary" - "encoding/json" "errors" "fmt" "github.com/siddontang/go-log/log" "github.com/siddontang/go-snappy/snappy" "github.com/siddontang/ledisdb/ledis" - "io/ioutil" + "github.com/siddontang/ledisdb/rpl" "net" "os" "path" @@ -23,52 +21,6 @@ var ( errConnectMaster = errors.New("connect master error") ) -type MasterInfo struct { - Addr string `json:"addr"` - LogFileIndex int64 `json:"log_file_index"` - LogPos int64 `json:"log_pos"` -} - -func (m *MasterInfo) Save(filePath string) error { - data, err := json.Marshal(m) - if err != nil { - return err - } - - filePathBak := fmt.Sprintf("%s.bak", filePath) - - var fd *os.File - fd, err = os.OpenFile(filePathBak, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return err - } - - if _, err = fd.Write(data); err != nil { - fd.Close() - return err - } - - fd.Close() - return os.Rename(filePathBak, filePath) -} - -func (m *MasterInfo) Load(filePath string) error { - data, err := ioutil.ReadFile(filePath) - if err != nil { - if os.IsNotExist(err) { - return nil - } else { - return err - } - } - - if err = json.Unmarshal(data, m); err != nil { - return err - } - - return nil -} - type master struct { sync.Mutex @@ -79,9 +31,7 @@ type master struct { quit chan struct{} - infoName string - - info *MasterInfo + addr string wg sync.WaitGroup @@ -94,17 +44,10 @@ func newMaster(app *App) *master { m := new(master) m.app = app - m.infoName = path.Join(m.app.cfg.DataDir, "master.info") - m.quit = make(chan struct{}, 1) m.compressBuf = make([]byte, 256) - m.info = new(MasterInfo) - - //if load error, we will start a fullsync later - m.loadInfo() - return m } @@ -122,16 +65,8 @@ func (m *master) Close() { m.wg.Wait() } -func (m *master) loadInfo() error { - return m.info.Load(m.infoName) -} - -func (m *master) saveInfo() error { - return m.info.Save(m.infoName) -} - func (m *master) connect() error { - if len(m.info.Addr) == 0 { + if len(m.addr) == 0 { return fmt.Errorf("no assign master addr") } @@ -140,7 +75,7 @@ func (m *master) connect() error { m.conn = nil } - if conn, err := net.Dial("tcp", m.info.Addr); err != nil { + if conn, err := net.Dial("tcp", m.addr); err != nil { return err } else { m.conn = conn @@ -150,19 +85,10 @@ func (m *master) connect() error { return nil } -func (m *master) resetInfo(addr string) { - m.info.Addr = addr - m.info.LogFileIndex = 0 - m.info.LogPos = 0 -} - func (m *master) stopReplication() error { m.Close() - if err := m.saveInfo(); err != nil { - log.Error("save master info error %s", err.Error()) - return err - } + m.app.ldb.SetReadOnly(false) return nil } @@ -171,16 +97,12 @@ func (m *master) startReplication(masterAddr string) error { //stop last replcation, if avaliable m.Close() - if masterAddr != m.info.Addr { - m.resetInfo(masterAddr) - if err := m.saveInfo(); err != nil { - log.Error("save master info error %s", err.Error()) - return err - } - } + m.addr = masterAddr m.quit = make(chan struct{}, 1) + m.app.ldb.SetReadOnly(true) + go m.runReplication() return nil } @@ -195,29 +117,12 @@ func (m *master) runReplication() { return default: if err := m.connect(); err != nil { - log.Error("connect master %s error %s, try 2s later", m.info.Addr, err.Error()) + log.Error("connect master %s error %s, try 2s later", m.addr, err.Error()) time.Sleep(2 * time.Second) continue } } - if m.info.LogFileIndex == 0 { - //try a fullsync - if err := m.fullSync(); err != nil { - if m.conn != nil { - //if conn == nil, other close the replication, not error - log.Warn("full sync error %s", err.Error()) - } - return - } - - if m.info.LogFileIndex == 0 { - //master not support binlog, we cannot sync, so stop replication - m.stopReplication() - return - } - } - for { if err := m.sync(); err != nil { if m.conn != nil { @@ -240,11 +145,13 @@ func (m *master) runReplication() { } var ( - fullSyncCmd = []byte("*1\r\n$8\r\nfullsync\r\n") //fullsync - syncCmdFormat = "*3\r\n$4\r\nsync\r\n$%d\r\n%s\r\n$%d\r\n%s\r\n" //sync index pos + fullSyncCmd = []byte("*1\r\n$8\r\nfullsync\r\n") //fullsync + syncCmdFormat = "*2\r\n$4\r\nsync\r\n$%d\r\n%s\r\n" //sync logid ) func (m *master) fullSync() error { + log.Info("begin full sync") + if _, err := m.conn.Write(fullSyncCmd); err != nil { return err } @@ -264,30 +171,25 @@ func (m *master) fullSync() error { return err } - if err = m.app.ldb.FlushAll(); err != nil { - return err - } - - var head *ledis.BinLogAnchor - head, err = m.app.ldb.LoadDumpFile(dumpPath) - - if err != nil { + if _, err = m.app.ldb.LoadDumpFile(dumpPath); err != nil { log.Error("load dump file error %s", err.Error()) return err } - m.info.LogFileIndex = head.LogFileIndex - m.info.LogPos = head.LogPos - - return m.saveInfo() + return nil } func (m *master) sync() error { - logIndexStr := strconv.FormatInt(m.info.LogFileIndex, 10) - logPosStr := strconv.FormatInt(m.info.LogPos, 10) + var err error + var syncID uint64 + if syncID, err = m.app.ldb.NextSyncLogID(); err != nil { + return err + } - cmd := ledis.Slice(fmt.Sprintf(syncCmdFormat, len(logIndexStr), - logIndexStr, len(logPosStr), logPosStr)) + logIDStr := strconv.FormatUint(syncID, 10) + + cmd := ledis.Slice(fmt.Sprintf(syncCmdFormat, len(logIDStr), + logIDStr)) if _, err := m.conn.Write(cmd); err != nil { return err @@ -295,9 +197,16 @@ func (m *master) sync() error { m.syncBuf.Reset() - err := ReadBulkTo(m.rb, &m.syncBuf) - if err != nil { - return err + if err = ReadBulkTo(m.rb, &m.syncBuf); err != nil { + switch err.Error() { + case ledis.ErrLogMissed.Error(): + return m.fullSync() + case ledis.ErrRplNotSupport.Error(): + m.stopReplication() + return nil + default: + return err + } } var buf []byte @@ -308,28 +217,15 @@ func (m *master) sync() error { m.compressBuf = buf } - if len(buf) < 16 { - return fmt.Errorf("invalid sync data len %d", len(buf)) - } - - m.info.LogFileIndex = int64(binary.BigEndian.Uint64(buf[0:8])) - m.info.LogPos = int64(binary.BigEndian.Uint64(buf[8:16])) - - if m.info.LogFileIndex == 0 { - //master now not support binlog, stop replication - m.stopReplication() + if len(buf) == 0 { return nil - } else if m.info.LogFileIndex == -1 { - //-1 means than binlog index and pos are lost, we must start a full sync instead - return m.fullSync() } - err = m.app.ldb.ReplicateFromData(buf[16:]) - if err != nil { + if err = m.app.ldb.StoreLogsFromData(buf); err != nil { return err } - return m.saveInfo() + return nil } @@ -337,6 +233,10 @@ func (app *App) slaveof(masterAddr string) error { app.m.Lock() defer app.m.Unlock() + if !app.ldb.ReplicationUsed() { + return fmt.Errorf("slaveof must enable replication") + } + if len(masterAddr) == 0 { return app.m.stopReplication() } else { @@ -345,3 +245,75 @@ func (app *App) slaveof(masterAddr string) error { return nil } + +func (app *App) addSlave(c *client) { + app.slock.Lock() + defer app.slock.Unlock() + + app.slaves[c] = struct{}{} +} + +func (app *App) removeSlave(c *client) { + app.slock.Lock() + defer app.slock.Unlock() + + delete(app.slaves, c) + + if c.ack != nil { + select { + case c.ack.ch <- c.lastSyncLogID: + default: + } + } +} + +func (app *App) publishNewLog(l *rpl.Log) { + if !app.cfg.Replication.Sync { + //no sync replication, we will do async + return + } + + ss := make([]*client, 0, 4) + app.slock.Lock() + + logId := l.ID + for s, _ := range app.slaves { + if s.lastSyncLogID >= logId { + //slave has already this log + ss = []*client{} + break + } else { + ss = append(ss, s) + } + } + + app.slock.Unlock() + + if len(ss) == 0 { + return + } + + ack := &syncAck{ + logId, make(chan uint64, len(ss)), + } + + for _, s := range ss { + s.ack = ack + } + + done := make(chan struct{}, 1) + go func() { + for i := 0; i < len(ss); i++ { + id := <-ack.ch + if id > logId { + break + } + } + done <- struct{}{} + }() + + select { + case <-done: + case <-time.After(time.Duration(app.cfg.Replication.WaitSyncTime) * time.Second): + } +} From 4bb886adffe8a573d0a7b8ee54e79c8f3289ffcb Mon Sep 17 00:00:00 2001 From: siddontang Date: Tue, 23 Sep 2014 17:53:52 +0800 Subject: [PATCH 12/42] update info --- ledis/replication.go | 8 ++++++++ server/client.go | 2 +- server/cmd_replication.go | 2 +- server/info.go | 14 ++++++++++++++ server/replication.go | 4 ++-- 5 files changed, 26 insertions(+), 4 deletions(-) diff --git a/ledis/replication.go b/ledis/replication.go index 4937952..ee219a6 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -216,3 +216,11 @@ func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error { return nil } + +func (l *Ledis) ReplicationStat() (*rpl.Stat, error) { + if !l.ReplicationUsed() { + return nil, ErrRplNotSupport + } + + return l.r.Stat() +} diff --git a/server/client.go b/server/client.go index 5bceaef..4474086 100644 --- a/server/client.go +++ b/server/client.go @@ -49,7 +49,7 @@ type client struct { syncBuf bytes.Buffer compressBuf []byte - lastSyncLogID uint64 + lastLogID uint64 ack *syncAck diff --git a/server/cmd_replication.go b/server/cmd_replication.go index 8447941..36ee248 100644 --- a/server/cmd_replication.go +++ b/server/cmd_replication.go @@ -78,7 +78,7 @@ func syncCommand(c *client) error { return ErrCmdParams } - c.lastSyncLogID = logId - 1 + c.lastLogID = logId - 1 if c.ack != nil && logId > c.ack.id { select { diff --git a/server/info.go b/server/info.go index cae6a3f..0680c20 100644 --- a/server/info.go +++ b/server/info.go @@ -81,6 +81,8 @@ func (i *info) Dump(section string) []byte { i.dumpPersistence(buf) case "goroutine": i.dumpGoroutine(buf) + case "replication": + i.dumpReplication(buf) default: buf.WriteString(fmt.Sprintf("# %s\r\n", section)) } @@ -103,6 +105,8 @@ func (i *info) dumpAll(buf *bytes.Buffer) { i.dumpMem(buf) buf.Write(Delims) i.dumpGoroutine(buf) + buf.Write(Delims) + i.dumpReplication(buf) } func (i *info) dumpServer(buf *bytes.Buffer) { @@ -142,6 +146,16 @@ func (i *info) dumpPersistence(buf *bytes.Buffer) { i.dumpPairs(buf, infoPair{"db_name", i.Persistence.DBName}) } +func (i *info) dumpReplication(buf *bytes.Buffer) { + buf.WriteString("# Replication\r\n") + + p := []infoPair{} + for s, _ := range i.app.slaves { + p = append(p, infoPair{"slave", s.remoteAddr}) + } + i.dumpPairs(buf, p...) +} + func (i *info) dumpPairs(buf *bytes.Buffer, pairs ...infoPair) { for _, v := range pairs { buf.WriteString(fmt.Sprintf("%s:%v\r\n", v.Key, v.Value)) diff --git a/server/replication.go b/server/replication.go index f05c676..6ccbced 100644 --- a/server/replication.go +++ b/server/replication.go @@ -261,7 +261,7 @@ func (app *App) removeSlave(c *client) { if c.ack != nil { select { - case c.ack.ch <- c.lastSyncLogID: + case c.ack.ch <- c.lastLogID: default: } } @@ -278,7 +278,7 @@ func (app *App) publishNewLog(l *rpl.Log) { logId := l.ID for s, _ := range app.slaves { - if s.lastSyncLogID >= logId { + if s.lastLogID >= logId { //slave has already this log ss = []*client{} break From eb77615b4b4918aa2c177b4df3439c2c980c4b66 Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 24 Sep 2014 09:44:42 +0800 Subject: [PATCH 13/42] rename file --- server/cmd_server.go | 106 +++++++++++++++++++++++++++++++++++++++++++ server/command.go | 87 ----------------------------------- 2 files changed, 106 insertions(+), 87 deletions(-) create mode 100644 server/cmd_server.go diff --git a/server/cmd_server.go b/server/cmd_server.go new file mode 100644 index 0000000..953c70e --- /dev/null +++ b/server/cmd_server.go @@ -0,0 +1,106 @@ +package server + +import ( + "github.com/siddontang/ledisdb/ledis" + "strconv" + "strings" +) + +func pingCommand(c *client) error { + c.resp.writeStatus(PONG) + return nil +} + +func echoCommand(c *client) error { + if len(c.args) != 1 { + return ErrCmdParams + } + + c.resp.writeBulk(c.args[0]) + return nil +} + +func selectCommand(c *client) error { + if len(c.args) != 1 { + return ErrCmdParams + } + + if index, err := strconv.Atoi(ledis.String(c.args[0])); err != nil { + return err + } else { + if c.db.IsInMulti() { + if err := c.script.Select(index); err != nil { + return err + } else { + c.db = c.script.DB + } + } else { + if db, err := c.ldb.Select(index); err != nil { + return err + } else { + c.db = db + } + } + c.resp.writeStatus(OK) + } + + return nil +} + +func infoCommand(c *client) error { + if len(c.args) > 1 { + return ErrCmdParams + } + var section string + if len(c.args) == 1 { + section = strings.ToLower(ledis.String(c.args[0])) + } + + buf := c.app.info.Dump(section) + c.resp.writeBulk(buf) + + return nil +} + +func flushallCommand(c *client) error { + err := c.ldb.FlushAll() + if err != nil { + return err + } + + c.resp.writeStatus(OK) + return nil +} + +func flushdbCommand(c *client) error { + _, err := c.db.FlushAll() + if err != nil { + return err + } + + c.resp.writeStatus(OK) + return nil +} + +func readonlyCommand(c *client) error { + if len(c.args) != 1 { + return ErrCmdParams + } + + if flag, err := strconv.Atoi(ledis.String(c.args[0])); err != nil { + return err + } else { + c.app.ldb.SetReadOnly(flag != 0) + c.resp.writeStatus(OK) + } + return nil +} + +func init() { + register("ping", pingCommand) + register("echo", echoCommand) + register("select", selectCommand) + register("info", infoCommand) + register("flushall", flushallCommand) + register("flushdb", flushdbCommand) +} diff --git a/server/command.go b/server/command.go index 0c66542..1c54c90 100644 --- a/server/command.go +++ b/server/command.go @@ -2,8 +2,6 @@ package server import ( "fmt" - "github.com/siddontang/ledisdb/ledis" - "strconv" "strings" ) @@ -18,88 +16,3 @@ func register(name string, f CommandFunc) { regCmds[name] = f } - -func pingCommand(c *client) error { - c.resp.writeStatus(PONG) - return nil -} - -func echoCommand(c *client) error { - if len(c.args) != 1 { - return ErrCmdParams - } - - c.resp.writeBulk(c.args[0]) - return nil -} - -func selectCommand(c *client) error { - if len(c.args) != 1 { - return ErrCmdParams - } - - if index, err := strconv.Atoi(ledis.String(c.args[0])); err != nil { - return err - } else { - if c.db.IsInMulti() { - if err := c.script.Select(index); err != nil { - return err - } else { - c.db = c.script.DB - } - } else { - if db, err := c.ldb.Select(index); err != nil { - return err - } else { - c.db = db - } - } - c.resp.writeStatus(OK) - } - - return nil -} - -func infoCommand(c *client) error { - if len(c.args) > 1 { - return ErrSyntax - } - var section string - if len(c.args) == 1 { - section = strings.ToLower(ledis.String(c.args[0])) - } - - buf := c.app.info.Dump(section) - c.resp.writeBulk(buf) - - return nil -} - -func flushallCommand(c *client) error { - err := c.ldb.FlushAll() - if err != nil { - return err - } - - c.resp.writeStatus(OK) - return nil -} - -func flushdbCommand(c *client) error { - _, err := c.db.FlushAll() - if err != nil { - return err - } - - c.resp.writeStatus(OK) - return nil -} - -func init() { - register("ping", pingCommand) - register("echo", echoCommand) - register("select", selectCommand) - register("info", infoCommand) - register("flushall", flushallCommand) - register("flushdb", flushdbCommand) -} From 7d1f13f9d7879f011614b896e67d1410dec12006 Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 24 Sep 2014 09:46:36 +0800 Subject: [PATCH 14/42] rename package --- bootstrap.sh | 7 +++---- ledis/batch.go | 2 +- ledis/dump.go | 2 +- ledis/ledis.go | 2 +- ledis/replication.go | 2 +- ledis/util.go | 4 ++++ rpl/file_store.go | 2 +- rpl/rpl.go | 2 +- server/accesslog.go | 2 +- server/client_http.go | 4 ++-- server/client_resp.go | 2 +- server/cmd_replication.go | 2 +- server/info.go | 15 ++++++++++++++- server/replication.go | 4 ++-- 14 files changed, 34 insertions(+), 18 deletions(-) diff --git a/bootstrap.sh b/bootstrap.sh index a93c219..4a12258 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -2,9 +2,6 @@ . ./dev.sh -go get github.com/siddontang/go-log/log -go get github.com/siddontang/go-snappy/snappy - go get github.com/siddontang/goleveldb/leveldb go get github.com/szferi/gomdb @@ -14,6 +11,8 @@ go get github.com/boltdb/bolt go get github.com/ugorji/go/codec go get github.com/BurntSushi/toml -go get github.com/siddontang/go-bson/bson +go get github.com/siddontang/go/bson +go get github.com/siddontang/go/log +go get github.com/siddontang/go/snappy go get github.com/siddontang/go/num diff --git a/ledis/batch.go b/ledis/batch.go index 0d7f2d6..4ed3bac 100644 --- a/ledis/batch.go +++ b/ledis/batch.go @@ -1,7 +1,7 @@ package ledis import ( - "github.com/siddontang/go-log/log" + "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/rpl" "github.com/siddontang/ledisdb/store" "sync" diff --git a/ledis/dump.go b/ledis/dump.go index bf40571..436e707 100644 --- a/ledis/dump.go +++ b/ledis/dump.go @@ -4,7 +4,7 @@ import ( "bufio" "bytes" "encoding/binary" - "github.com/siddontang/go-snappy/snappy" + "github.com/siddontang/go/snappy" "github.com/siddontang/ledisdb/store" "io" "os" diff --git a/ledis/ledis.go b/ledis/ledis.go index 2660701..2669c3a 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -2,7 +2,7 @@ package ledis import ( "fmt" - "github.com/siddontang/go-log/log" + "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/rpl" "github.com/siddontang/ledisdb/store" diff --git a/ledis/replication.go b/ledis/replication.go index ee219a6..e3002a7 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -3,7 +3,7 @@ package ledis import ( "bytes" "errors" - "github.com/siddontang/go-log/log" + "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/rpl" "io" "time" diff --git a/ledis/util.go b/ledis/util.go index b245f64..49e78a9 100644 --- a/ledis/util.go +++ b/ledis/util.go @@ -110,6 +110,10 @@ func StrPutInt64(v int64) []byte { return strconv.AppendInt(nil, v, 10) } +func StrPutUint64(v uint64) []byte { + return strconv.AppendUint(nil, v, 10) +} + func MinUInt32(a uint32, b uint32) uint32 { if a > b { return b diff --git a/rpl/file_store.go b/rpl/file_store.go index 51ca293..df51b03 100644 --- a/rpl/file_store.go +++ b/rpl/file_store.go @@ -2,7 +2,7 @@ package rpl import ( "fmt" - "github.com/siddontang/go-log/log" + "github.com/siddontang/go/log" "io/ioutil" "os" "path" diff --git a/rpl/rpl.go b/rpl/rpl.go index 8227371..f7324c8 100644 --- a/rpl/rpl.go +++ b/rpl/rpl.go @@ -2,7 +2,7 @@ package rpl import ( "encoding/binary" - "github.com/siddontang/go-log/log" + "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/config" "os" "path" diff --git a/server/accesslog.go b/server/accesslog.go index 9e517a8..4c41f1e 100644 --- a/server/accesslog.go +++ b/server/accesslog.go @@ -1,7 +1,7 @@ package server import ( - "github.com/siddontang/go-log/log" + "github.com/siddontang/go/log" ) const ( diff --git a/server/client_http.go b/server/client_http.go index 28ce7d1..445790c 100644 --- a/server/client_http.go +++ b/server/client_http.go @@ -3,8 +3,8 @@ package server import ( "encoding/json" "fmt" - "github.com/siddontang/go-bson/bson" - "github.com/siddontang/go-log/log" + "github.com/siddontang/go/bson" + "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/ledis" "github.com/ugorji/go/codec" "io" diff --git a/server/client_resp.go b/server/client_resp.go index 5d9604c..1651c88 100644 --- a/server/client_resp.go +++ b/server/client_resp.go @@ -3,7 +3,7 @@ package server import ( "bufio" "errors" - "github.com/siddontang/go-log/log" + "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/ledis" "io" "net" diff --git a/server/cmd_replication.go b/server/cmd_replication.go index 36ee248..100244c 100644 --- a/server/cmd_replication.go +++ b/server/cmd_replication.go @@ -2,7 +2,7 @@ package server import ( "fmt" - "github.com/siddontang/go-snappy/snappy" + "github.com/siddontang/go/snappy" "github.com/siddontang/ledisdb/ledis" "io/ioutil" "os" diff --git a/server/info.go b/server/info.go index 0680c20..119c6d3 100644 --- a/server/info.go +++ b/server/info.go @@ -150,9 +150,22 @@ func (i *info) dumpReplication(buf *bytes.Buffer) { buf.WriteString("# Replication\r\n") p := []infoPair{} + slaves := make([]string, 0, len(i.app.slaves)) for s, _ := range i.app.slaves { - p = append(p, infoPair{"slave", s.remoteAddr}) + slaves = append(slaves, s.remoteAddr) } + + p = append(p, infoPair{"readonly", i.app.ldb.IsReadOnly()}) + + if len(slaves) > 0 { + p = append(p, infoPair{"slave", strings.Join(slaves, ",")}) + } + + s, _ := i.app.ldb.ReplicationStat() + p = append(p, infoPair{"last_log_id", s.LastID}) + p = append(p, infoPair{"first_log_id", s.FirstID}) + p = append(p, infoPair{"commit_log_id", s.CommitID}) + i.dumpPairs(buf, p...) } diff --git a/server/replication.go b/server/replication.go index 6ccbced..2bbc1c2 100644 --- a/server/replication.go +++ b/server/replication.go @@ -5,8 +5,8 @@ import ( "bytes" "errors" "fmt" - "github.com/siddontang/go-log/log" - "github.com/siddontang/go-snappy/snappy" + "github.com/siddontang/go/log" + "github.com/siddontang/go/snappy" "github.com/siddontang/ledisdb/ledis" "github.com/siddontang/ledisdb/rpl" "net" From 5bccfd13dc3549be6cdd75bcdff91fffd76bd1f9 Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 24 Sep 2014 12:34:21 +0800 Subject: [PATCH 15/42] use tiny go package --- ledis/event.go | 37 ++++++++++++++------------- ledis/t_bit.go | 7 ++--- ledis/t_set.go | 15 ++++++----- ledis/t_ttl_test.go | 11 ++++---- ledis/t_zset.go | 13 +++++----- ledis/util.go | 54 ++++----------------------------------- server/client_http.go | 11 ++++---- server/client_resp.go | 27 ++++++++++---------- server/cmd_bit.go | 4 ++- server/cmd_kv.go | 15 ++++++----- server/cmd_replication.go | 8 +++--- server/cmd_script.go | 17 ++++++------ server/cmd_server.go | 8 +++--- server/cmd_zset.go | 32 ++++++++++++----------- server/replication.go | 4 ++- server/script.go | 21 +++++++-------- server/util.go | 4 +-- 17 files changed, 131 insertions(+), 157 deletions(-) diff --git a/ledis/event.go b/ledis/event.go index 6421766..72ac373 100644 --- a/ledis/event.go +++ b/ledis/event.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "errors" "fmt" + "github.com/siddontang/go/hack" "io" "strconv" ) @@ -114,27 +115,27 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { if key, err := db.decodeKVKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) } case HashType: if key, field, err := db.hDecodeHashKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, String(field)) + buf = strconv.AppendQuote(buf, hack.String(field)) } case HSizeType: if key, err := db.hDecodeSizeKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) } case ListType: if key, seq, err := db.lDecodeListKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) buf = append(buf, ' ') buf = strconv.AppendInt(buf, int64(seq), 10) } @@ -142,29 +143,29 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { if key, err := db.lDecodeMetaKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) } case ZSetType: if key, m, err := db.zDecodeSetKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, String(m)) + buf = strconv.AppendQuote(buf, hack.String(m)) } case ZSizeType: if key, err := db.zDecodeSizeKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) } case ZScoreType: if key, m, score, err := db.zDecodeScoreKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, String(m)) + buf = strconv.AppendQuote(buf, hack.String(m)) buf = append(buf, ' ') buf = strconv.AppendInt(buf, score, 10) } @@ -172,7 +173,7 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { if key, seq, err := db.bDecodeBinKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) buf = append(buf, ' ') buf = strconv.AppendUint(buf, uint64(seq), 10) } @@ -180,21 +181,21 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { if key, err := db.bDecodeMetaKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) } case SetType: if key, member, err := db.sDecodeSetKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, String(member)) + buf = strconv.AppendQuote(buf, hack.String(member)) } case SSizeType: if key, err := db.sDecodeSizeKey(k); err != nil { return nil, err } else { - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) } case ExpTimeType: if tp, key, t, err := db.expDecodeTimeKey(k); err != nil { @@ -202,7 +203,7 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { } else { buf = append(buf, TypeName[tp]...) buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) buf = append(buf, ' ') buf = strconv.AppendInt(buf, t, 10) } @@ -212,7 +213,7 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { } else { buf = append(buf, TypeName[tp]...) buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, String(key)) + buf = strconv.AppendQuote(buf, hack.String(key)) } default: return nil, errInvalidEvent diff --git a/ledis/t_bit.go b/ledis/t_bit.go index 496c37a..ab104db 100644 --- a/ledis/t_bit.go +++ b/ledis/t_bit.go @@ -3,6 +3,7 @@ package ledis import ( "encoding/binary" "errors" + "github.com/siddontang/go/num" "github.com/siddontang/ledisdb/store" "sort" "time" @@ -239,8 +240,8 @@ func (db *DB) bUpdateMeta(t *batch, key []byte, seq uint32, off uint32) (tailSeq } else if tseq < 0 { update = true } else { - tailSeq = uint32(MaxInt32(tseq, 0)) - tailOff = uint32(MaxInt32(toff, 0)) + tailSeq = uint32(num.MaxInt32(tseq, 0)) + tailOff = uint32(num.MaxInt32(toff, 0)) update = (seq > tailSeq || (seq == tailSeq && off > tailOff)) } @@ -461,7 +462,7 @@ func (db *DB) BGet(key []byte) (data []byte, err error) { } s = seq << segByteWidth - e = MinUInt32(s+segByteSize, capByteSize) + e = num.MinUint32(s+segByteSize, capByteSize) copy(data[s:e], it.RawValue()) } it.Close() diff --git a/ledis/t_set.go b/ledis/t_set.go index 330462a..7f41f53 100644 --- a/ledis/t_set.go +++ b/ledis/t_set.go @@ -3,6 +3,7 @@ package ledis import ( "encoding/binary" "errors" + "github.com/siddontang/go/hack" "github.com/siddontang/ledisdb/store" "time" ) @@ -240,7 +241,7 @@ func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) { } for _, m := range members { - destMap[String(m)] = true + destMap[hack.String(m)] = true } for _, k := range keys[1:] { @@ -250,10 +251,10 @@ func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) { } for _, m := range members { - if _, ok := destMap[String(m)]; !ok { + if _, ok := destMap[hack.String(m)]; !ok { continue } else if ok { - delete(destMap, String(m)) + delete(destMap, hack.String(m)) } } // O - A = O, O is zero set. @@ -294,7 +295,7 @@ func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) { } for _, m := range members { - destMap[String(m)] = true + destMap[hack.String(m)] = true } for _, key := range keys[1:] { @@ -314,8 +315,8 @@ func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) { if err := checkKeySize(member); err != nil { return nil, err } - if _, ok := destMap[String(member)]; ok { - tempMap[String(member)] = true //mark this item as selected + if _, ok := destMap[hack.String(member)]; ok { + tempMap[hack.String(member)] = true //mark this item as selected } } destMap = tempMap //reduce the size of the result set @@ -439,7 +440,7 @@ func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) { } for _, member := range members { - dstMap[String(member)] = true + dstMap[hack.String(member)] = true } } diff --git a/ledis/t_ttl_test.go b/ledis/t_ttl_test.go index abfe2bf..b041e41 100644 --- a/ledis/t_ttl_test.go +++ b/ledis/t_ttl_test.go @@ -2,6 +2,7 @@ package ledis import ( "fmt" + "github.com/siddontang/go/hack" "sync" "testing" "time" @@ -49,7 +50,7 @@ func listAdaptor(db *DB) *adaptor { adp.set = func(k []byte, v []byte) (int64, error) { eles := make([][]byte, 0) for i := 0; i < 3; i++ { - e := []byte(String(v) + fmt.Sprintf("_%d", i)) + e := []byte(hack.String(v) + fmt.Sprintf("_%d", i)) eles = append(eles, e) } @@ -87,8 +88,8 @@ func hashAdaptor(db *DB) *adaptor { for i := 0; i < 3; i++ { suffix := fmt.Sprintf("_%d", i) pair := FVPair{ - Field: []byte(String(k) + suffix), - Value: []byte(String(v) + suffix)} + Field: []byte(hack.String(k) + suffix), + Value: []byte(hack.String(v) + suffix)} datas = append(datas, pair) } @@ -125,7 +126,7 @@ func zsetAdaptor(db *DB) *adaptor { adp.set = func(k []byte, v []byte) (int64, error) { datas := make([]ScorePair, 0) for i := 0; i < 3; i++ { - memb := []byte(String(k) + fmt.Sprintf("_%d", i)) + memb := []byte(hack.String(k) + fmt.Sprintf("_%d", i)) pair := ScorePair{ Score: int64(i), Member: memb} @@ -165,7 +166,7 @@ func setAdaptor(db *DB) *adaptor { adp.set = func(k []byte, v []byte) (int64, error) { eles := make([][]byte, 0) for i := 0; i < 3; i++ { - e := []byte(String(v) + fmt.Sprintf("_%d", i)) + e := []byte(hack.String(v) + fmt.Sprintf("_%d", i)) eles = append(eles, e) } diff --git a/ledis/t_zset.go b/ledis/t_zset.go index 50fc6aa..d7cb2b1 100644 --- a/ledis/t_zset.go +++ b/ledis/t_zset.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "errors" + "github.com/siddontang/go/hack" "github.com/siddontang/ledisdb/store" "time" ) @@ -833,10 +834,10 @@ func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, agg return 0, err } for _, pair := range scorePairs { - if score, ok := destMap[String(pair.Member)]; !ok { - destMap[String(pair.Member)] = pair.Score * weights[i] + if score, ok := destMap[hack.String(pair.Member)]; !ok { + destMap[hack.String(pair.Member)] = pair.Score * weights[i] } else { - destMap[String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i]) + destMap[hack.String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i]) } } } @@ -893,7 +894,7 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg return 0, err } for _, pair := range scorePairs { - destMap[String(pair.Member)] = pair.Score * weights[0] + destMap[hack.String(pair.Member)] = pair.Score * weights[0] } for i, key := range srcKeys[1:] { @@ -903,8 +904,8 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg } tmpMap := map[string]int64{} for _, pair := range scorePairs { - if score, ok := destMap[String(pair.Member)]; ok { - tmpMap[String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i+1]) + if score, ok := destMap[hack.String(pair.Member)]; ok { + tmpMap[hack.String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i+1]) } } destMap = tmpMap diff --git a/ledis/util.go b/ledis/util.go index 49e78a9..9454532 100644 --- a/ledis/util.go +++ b/ledis/util.go @@ -3,6 +3,7 @@ package ledis import ( "encoding/binary" "errors" + "github.com/siddontang/go/hack" "reflect" "strconv" "unsafe" @@ -10,27 +11,6 @@ import ( var errIntNumber = errors.New("invalid integer") -// no copy to change slice to string -// use your own risk -func String(b []byte) (s string) { - pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) - pstring.Data = pbytes.Data - pstring.Len = pbytes.Len - return -} - -// no copy to change string to slice -// use your own risk -func Slice(s string) (b []byte) { - pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) - pbytes.Data = pstring.Data - pbytes.Len = pstring.Len - pbytes.Cap = pstring.Len - return -} - func Int64(v []byte, err error) (int64, error) { if err != nil { return 0, err @@ -70,7 +50,7 @@ func StrInt64(v []byte, err error) (int64, error) { } else if v == nil { return 0, nil } else { - return strconv.ParseInt(String(v), 10, 64) + return strconv.ParseInt(hack.String(v), 10, 64) } } @@ -80,7 +60,7 @@ func StrUint64(v []byte, err error) (uint64, error) { } else if v == nil { return 0, nil } else { - return strconv.ParseUint(String(v), 10, 64) + return strconv.ParseUint(hack.String(v), 10, 64) } } @@ -90,7 +70,7 @@ func StrInt32(v []byte, err error) (int32, error) { } else if v == nil { return 0, nil } else { - res, err := strconv.ParseInt(String(v), 10, 32) + res, err := strconv.ParseInt(hack.String(v), 10, 32) return int32(res), err } } @@ -101,7 +81,7 @@ func StrInt8(v []byte, err error) (int8, error) { } else if v == nil { return 0, nil } else { - res, err := strconv.ParseInt(String(v), 10, 8) + res, err := strconv.ParseInt(hack.String(v), 10, 8) return int8(res), err } } @@ -114,30 +94,6 @@ func StrPutUint64(v uint64) []byte { return strconv.AppendUint(nil, v, 10) } -func MinUInt32(a uint32, b uint32) uint32 { - if a > b { - return b - } else { - return a - } -} - -func MaxUInt32(a uint32, b uint32) uint32 { - if a > b { - return a - } else { - return b - } -} - -func MaxInt32(a int32, b int32) int32 { - if a > b { - return a - } else { - return b - } -} - func AsyncNotify(ch chan struct{}) { select { case ch <- struct{}{}: diff --git a/server/client_http.go b/server/client_http.go index 445790c..115e44b 100644 --- a/server/client_http.go +++ b/server/client_http.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "github.com/siddontang/go/bson" + "github.com/siddontang/go/hack" "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/ledis" "github.com/ugorji/go/codec" @@ -151,7 +152,7 @@ func (w *httpWriter) writeBulk(b []byte) { if b == nil { w.genericWrite(nil) } else { - w.genericWrite(ledis.String(b)) + w.genericWrite(hack.String(b)) } } @@ -165,7 +166,7 @@ func (w *httpWriter) writeSliceArray(lst [][]byte) { if elem == nil { arr[i] = nil } else { - arr[i] = ledis.String(elem) + arr[i] = hack.String(elem) } } w.genericWrite(arr) @@ -174,7 +175,7 @@ func (w *httpWriter) writeSliceArray(lst [][]byte) { func (w *httpWriter) writeFVPairArray(lst []ledis.FVPair) { m := make(map[string]string) for _, elem := range lst { - m[ledis.String(elem.Field)] = ledis.String(elem.Value) + m[hack.String(elem.Field)] = hack.String(elem.Value) } w.genericWrite(m) } @@ -184,13 +185,13 @@ func (w *httpWriter) writeScorePairArray(lst []ledis.ScorePair, withScores bool) if withScores { arr = make([]string, 2*len(lst)) for i, data := range lst { - arr[2*i] = ledis.String(data.Member) + arr[2*i] = hack.String(data.Member) arr[2*i+1] = strconv.FormatInt(data.Score, 10) } } else { arr = make([]string, len(lst)) for i, data := range lst { - arr[i] = ledis.String(data.Member) + arr[i] = hack.String(data.Member) } } w.genericWrite(arr) diff --git a/server/client_resp.go b/server/client_resp.go index 1651c88..ed72108 100644 --- a/server/client_resp.go +++ b/server/client_resp.go @@ -3,6 +3,7 @@ package server import ( "bufio" "errors" + "github.com/siddontang/go/hack" "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/ledis" "io" @@ -80,7 +81,7 @@ func (c *respClient) readRequest() ([][]byte, error) { } var nparams int - if nparams, err = strconv.Atoi(ledis.String(l[1:])); err != nil { + if nparams, err = strconv.Atoi(hack.String(l[1:])); err != nil { return nil, err } else if nparams <= 0 { return nil, errReadRequest @@ -97,7 +98,7 @@ func (c *respClient) readRequest() ([][]byte, error) { return nil, errReadRequest } else if l[0] == '$' { //handle resp string - if n, err = strconv.Atoi(ledis.String(l[1:])); err != nil { + if n, err = strconv.Atoi(hack.String(l[1:])); err != nil { return nil, err } else if n == -1 { req = append(req, nil) @@ -130,7 +131,7 @@ func (c *respClient) handleRequest(reqData [][]byte) { c.cmd = "" c.args = reqData[0:0] } else { - c.cmd = strings.ToLower(ledis.String(reqData[0])) + c.cmd = strings.ToLower(hack.String(reqData[0])) c.args = reqData[1:] } if c.cmd == "quit" { @@ -154,17 +155,17 @@ func newWriterRESP(conn net.Conn) *respWriter { } func (w *respWriter) writeError(err error) { - w.buff.Write(ledis.Slice("-ERR")) + w.buff.Write(hack.Slice("-ERR")) if err != nil { w.buff.WriteByte(' ') - w.buff.Write(ledis.Slice(err.Error())) + w.buff.Write(hack.Slice(err.Error())) } w.buff.Write(Delims) } func (w *respWriter) writeStatus(status string) { w.buff.WriteByte('+') - w.buff.Write(ledis.Slice(status)) + w.buff.Write(hack.Slice(status)) w.buff.Write(Delims) } @@ -179,7 +180,7 @@ func (w *respWriter) writeBulk(b []byte) { if b == nil { w.buff.Write(NullBulk) } else { - w.buff.Write(ledis.Slice(strconv.Itoa(len(b)))) + w.buff.Write(hack.Slice(strconv.Itoa(len(b)))) w.buff.Write(Delims) w.buff.Write(b) } @@ -193,7 +194,7 @@ func (w *respWriter) writeArray(lst []interface{}) { w.buff.Write(NullArray) w.buff.Write(Delims) } else { - w.buff.Write(ledis.Slice(strconv.Itoa(len(lst)))) + w.buff.Write(hack.Slice(strconv.Itoa(len(lst)))) w.buff.Write(Delims) for i := 0; i < len(lst); i++ { @@ -221,7 +222,7 @@ func (w *respWriter) writeSliceArray(lst [][]byte) { w.buff.Write(NullArray) w.buff.Write(Delims) } else { - w.buff.Write(ledis.Slice(strconv.Itoa(len(lst)))) + w.buff.Write(hack.Slice(strconv.Itoa(len(lst)))) w.buff.Write(Delims) for i := 0; i < len(lst); i++ { @@ -236,7 +237,7 @@ func (w *respWriter) writeFVPairArray(lst []ledis.FVPair) { w.buff.Write(NullArray) w.buff.Write(Delims) } else { - w.buff.Write(ledis.Slice(strconv.Itoa(len(lst) * 2))) + w.buff.Write(hack.Slice(strconv.Itoa(len(lst) * 2))) w.buff.Write(Delims) for i := 0; i < len(lst); i++ { @@ -253,10 +254,10 @@ func (w *respWriter) writeScorePairArray(lst []ledis.ScorePair, withScores bool) w.buff.Write(Delims) } else { if withScores { - w.buff.Write(ledis.Slice(strconv.Itoa(len(lst) * 2))) + w.buff.Write(hack.Slice(strconv.Itoa(len(lst) * 2))) w.buff.Write(Delims) } else { - w.buff.Write(ledis.Slice(strconv.Itoa(len(lst)))) + w.buff.Write(hack.Slice(strconv.Itoa(len(lst)))) w.buff.Write(Delims) } @@ -273,7 +274,7 @@ func (w *respWriter) writeScorePairArray(lst []ledis.ScorePair, withScores bool) func (w *respWriter) writeBulkFrom(n int64, rb io.Reader) { w.buff.WriteByte('$') - w.buff.Write(ledis.Slice(strconv.FormatInt(n, 10))) + w.buff.Write(hack.Slice(strconv.FormatInt(n, 10))) w.buff.Write(Delims) io.Copy(w.buff, rb) diff --git a/server/cmd_bit.go b/server/cmd_bit.go index 1f83bdc..22d34fe 100644 --- a/server/cmd_bit.go +++ b/server/cmd_bit.go @@ -1,6 +1,8 @@ package server import ( + "github.com/siddontang/go/hack" + "github.com/siddontang/ledisdb/ledis" "strings" ) @@ -173,7 +175,7 @@ func boptCommand(c *client) error { return ErrCmdParams } - opDesc := strings.ToLower(ledis.String(args[0])) + opDesc := strings.ToLower(hack.String(args[0])) dstKey := args[1] srcKeys := args[2:] diff --git a/server/cmd_kv.go b/server/cmd_kv.go index c170601..f7a90d8 100644 --- a/server/cmd_kv.go +++ b/server/cmd_kv.go @@ -1,6 +1,7 @@ package server import ( + "github.com/siddontang/go/hack" "github.com/siddontang/ledisdb/ledis" "strconv" "strings" @@ -292,26 +293,26 @@ func parseScanArgs(c *client) (key []byte, match string, count int, err error) { } if len(args) == 3 { - switch strings.ToLower(ledis.String(args[1])) { + switch strings.ToLower(hack.String(args[1])) { case "match": - match = ledis.String(args[2]) + match = hack.String(args[2]) case "count": - count, err = strconv.Atoi(ledis.String(args[2])) + count, err = strconv.Atoi(hack.String(args[2])) default: err = ErrCmdParams return } } else if len(args) == 5 { - if strings.ToLower(ledis.String(args[1])) != "match" { + if strings.ToLower(hack.String(args[1])) != "match" { err = ErrCmdParams return - } else if strings.ToLower(ledis.String(args[3])) != "count" { + } else if strings.ToLower(hack.String(args[3])) != "count" { err = ErrCmdParams return } - match = ledis.String(args[2]) - count, err = strconv.Atoi(ledis.String(args[4])) + match = hack.String(args[2]) + count, err = strconv.Atoi(hack.String(args[4])) } if count <= 0 { diff --git a/server/cmd_replication.go b/server/cmd_replication.go index 100244c..1433143 100644 --- a/server/cmd_replication.go +++ b/server/cmd_replication.go @@ -2,7 +2,9 @@ package server import ( "fmt" + "github.com/siddontang/go/hack" "github.com/siddontang/go/snappy" + "github.com/siddontang/ledisdb/ledis" "io/ioutil" "os" @@ -19,11 +21,11 @@ func slaveofCommand(c *client) error { masterAddr := "" - if strings.ToLower(ledis.String(args[0])) == "no" && - strings.ToLower(ledis.String(args[1])) == "one" { + if strings.ToLower(hack.String(args[0])) == "no" && + strings.ToLower(hack.String(args[1])) == "one" { //stop replication, use master = "" } else { - if _, err := strconv.ParseInt(ledis.String(args[1]), 10, 16); err != nil { + if _, err := strconv.ParseInt(hack.String(args[1]), 10, 16); err != nil { return err } diff --git a/server/cmd_script.go b/server/cmd_script.go index e7d62a4..a35c153 100644 --- a/server/cmd_script.go +++ b/server/cmd_script.go @@ -6,7 +6,8 @@ import ( "crypto/sha1" "encoding/hex" "fmt" - "github.com/siddontang/ledisdb/ledis" + "github.com/siddontang/go/hack" + "github.com/siddontang/ledisdb/lua" "strconv" "strings" @@ -20,7 +21,7 @@ func parseEvalArgs(l *lua.State, c *client) error { args = args[1:] - n, err := strconv.Atoi(ledis.String(args[0])) + n, err := strconv.Atoi(hack.String(args[0])) if err != nil { return err } @@ -72,7 +73,7 @@ func evalGenericCommand(c *client, evalSha1 bool) error { h := sha1.Sum(c.args[0]) key = hex.EncodeToString(h[0:20]) } else { - key = strings.ToLower(ledis.String(c.args[0])) + key = strings.ToLower(hack.String(c.args[0])) } l.GetGlobal(key) @@ -84,7 +85,7 @@ func evalGenericCommand(c *client, evalSha1 bool) error { return fmt.Errorf("missing %s script", key) } - if r := l.LoadString(ledis.String(c.args[0])); r != 0 { + if r := l.LoadString(hack.String(c.args[0])); r != 0 { err := fmt.Errorf("%s", l.ToString(-1)) l.Pop(1) return err @@ -139,7 +140,7 @@ func scriptCommand(c *client) error { return ErrCmdParams } - switch strings.ToLower(ledis.String(args[0])) { + switch strings.ToLower(hack.String(args[0])) { case "load": return scriptLoadCommand(c) case "exists": @@ -164,7 +165,7 @@ func scriptLoadCommand(c *client) error { h := sha1.Sum(c.args[1]) key := hex.EncodeToString(h[0:20]) - if r := l.LoadString(ledis.String(c.args[1])); r != 0 { + if r := l.LoadString(hack.String(c.args[1])); r != 0 { err := fmt.Errorf("%s", l.ToString(-1)) l.Pop(1) return err @@ -175,7 +176,7 @@ func scriptLoadCommand(c *client) error { s.chunks[key] = struct{}{} } - c.resp.writeBulk(ledis.Slice(key)) + c.resp.writeBulk(hack.Slice(key)) return nil } @@ -188,7 +189,7 @@ func scriptExistsCommand(c *client) error { ay := make([]interface{}, len(c.args[1:])) for i, n := range c.args[1:] { - if _, ok := s.chunks[ledis.String(n)]; ok { + if _, ok := s.chunks[hack.String(n)]; ok { ay[i] = int64(1) } else { ay[i] = int64(0) diff --git a/server/cmd_server.go b/server/cmd_server.go index 953c70e..1d7a1cb 100644 --- a/server/cmd_server.go +++ b/server/cmd_server.go @@ -1,7 +1,7 @@ package server import ( - "github.com/siddontang/ledisdb/ledis" + "github.com/siddontang/go/hack" "strconv" "strings" ) @@ -25,7 +25,7 @@ func selectCommand(c *client) error { return ErrCmdParams } - if index, err := strconv.Atoi(ledis.String(c.args[0])); err != nil { + if index, err := strconv.Atoi(hack.String(c.args[0])); err != nil { return err } else { if c.db.IsInMulti() { @@ -53,7 +53,7 @@ func infoCommand(c *client) error { } var section string if len(c.args) == 1 { - section = strings.ToLower(ledis.String(c.args[0])) + section = strings.ToLower(hack.String(c.args[0])) } buf := c.app.info.Dump(section) @@ -87,7 +87,7 @@ func readonlyCommand(c *client) error { return ErrCmdParams } - if flag, err := strconv.Atoi(ledis.String(c.args[0])); err != nil { + if flag, err := strconv.Atoi(hack.String(c.args[0])); err != nil { return err } else { c.app.ldb.SetReadOnly(flag != 0) diff --git a/server/cmd_zset.go b/server/cmd_zset.go index ee55faf..9c58f5b 100644 --- a/server/cmd_zset.go +++ b/server/cmd_zset.go @@ -2,6 +2,8 @@ package server import ( "errors" + "github.com/siddontang/go/hack" + "github.com/siddontang/ledisdb/ledis" "math" "strconv" @@ -117,7 +119,7 @@ func zincrbyCommand(c *client) error { } func zparseScoreRange(minBuf []byte, maxBuf []byte) (min int64, max int64, err error) { - if strings.ToLower(ledis.String(minBuf)) == "-inf" { + if strings.ToLower(hack.String(minBuf)) == "-inf" { min = math.MinInt64 } else { @@ -148,7 +150,7 @@ func zparseScoreRange(minBuf []byte, maxBuf []byte) (min int64, max int64, err e } } - if strings.ToLower(ledis.String(maxBuf)) == "+inf" { + if strings.ToLower(hack.String(maxBuf)) == "+inf" { max = math.MaxInt64 } else { var ropen = false @@ -289,11 +291,11 @@ func zremrangebyscoreCommand(c *client) error { } func zparseRange(c *client, a1 []byte, a2 []byte) (start int, stop int, err error) { - if start, err = strconv.Atoi(ledis.String(a1)); err != nil { + if start, err = strconv.Atoi(hack.String(a1)); err != nil { return } - if stop, err = strconv.Atoi(ledis.String(a2)); err != nil { + if stop, err = strconv.Atoi(hack.String(a2)); err != nil { return } @@ -320,7 +322,7 @@ func zrangeGeneric(c *client, reverse bool) error { if len(args) != 1 { return ErrCmdParams } - if strings.ToLower(ledis.String(args[0])) == "withscores" { + if strings.ToLower(hack.String(args[0])) == "withscores" { withScores = true } else { return ErrSyntax @@ -370,7 +372,7 @@ func zrangebyscoreGeneric(c *client, reverse bool) error { var withScores bool = false if len(args) > 0 { - if strings.ToLower(ledis.String(args[0])) == "withscores" { + if strings.ToLower(hack.String(args[0])) == "withscores" { withScores = true args = args[1:] } @@ -384,15 +386,15 @@ func zrangebyscoreGeneric(c *client, reverse bool) error { return ErrCmdParams } - if strings.ToLower(ledis.String(args[0])) != "limit" { + if strings.ToLower(hack.String(args[0])) != "limit" { return ErrSyntax } - if offset, err = strconv.Atoi(ledis.String(args[1])); err != nil { + if offset, err = strconv.Atoi(hack.String(args[1])); err != nil { return ErrValue } - if count, err = strconv.Atoi(ledis.String(args[2])); err != nil { + if count, err = strconv.Atoi(hack.String(args[2])); err != nil { return ErrValue } } @@ -523,7 +525,7 @@ func zpersistCommand(c *client) error { func zparseZsetoptStore(args [][]byte) (destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte, err error) { destKey = args[0] - nKeys, err := strconv.Atoi(ledis.String(args[1])) + nKeys, err := strconv.Atoi(hack.String(args[1])) if err != nil { err = ErrValue return @@ -542,7 +544,7 @@ func zparseZsetoptStore(args [][]byte) (destKey []byte, srcKeys [][]byte, weight var aggregateFlag = false for len(args) > 0 { - if strings.ToLower(ledis.String(args[0])) == "weights" { + if strings.ToLower(hack.String(args[0])) == "weights" { if weightsFlag { err = ErrSyntax return @@ -565,7 +567,7 @@ func zparseZsetoptStore(args [][]byte) (destKey []byte, srcKeys [][]byte, weight weightsFlag = true - } else if strings.ToLower(ledis.String(args[0])) == "aggregate" { + } else if strings.ToLower(hack.String(args[0])) == "aggregate" { if aggregateFlag { err = ErrSyntax return @@ -575,11 +577,11 @@ func zparseZsetoptStore(args [][]byte) (destKey []byte, srcKeys [][]byte, weight return } - if strings.ToLower(ledis.String(args[1])) == "sum" { + if strings.ToLower(hack.String(args[1])) == "sum" { aggregate = ledis.AggregateSum - } else if strings.ToLower(ledis.String(args[1])) == "min" { + } else if strings.ToLower(hack.String(args[1])) == "min" { aggregate = ledis.AggregateMin - } else if strings.ToLower(ledis.String(args[1])) == "max" { + } else if strings.ToLower(hack.String(args[1])) == "max" { aggregate = ledis.AggregateMax } else { err = ErrSyntax diff --git a/server/replication.go b/server/replication.go index 2bbc1c2..3616762 100644 --- a/server/replication.go +++ b/server/replication.go @@ -5,7 +5,9 @@ import ( "bytes" "errors" "fmt" + "github.com/siddontang/go/hack" "github.com/siddontang/go/log" + "github.com/siddontang/go/snappy" "github.com/siddontang/ledisdb/ledis" "github.com/siddontang/ledisdb/rpl" @@ -188,7 +190,7 @@ func (m *master) sync() error { logIDStr := strconv.FormatUint(syncID, 10) - cmd := ledis.Slice(fmt.Sprintf(syncCmdFormat, len(logIDStr), + cmd := hack.Slice(fmt.Sprintf(syncCmdFormat, len(logIDStr), logIDStr)) if _, err := m.conn.Write(cmd); err != nil { diff --git a/server/script.go b/server/script.go index f8222c4..24566c3 100644 --- a/server/script.go +++ b/server/script.go @@ -5,6 +5,7 @@ package server import ( "encoding/hex" "fmt" + "github.com/siddontang/go/hack" "github.com/siddontang/ledisdb/ledis" "github.com/siddontang/ledisdb/lua" "io" @@ -38,7 +39,7 @@ func (w *luaWriter) writeBulk(b []byte) { if b == nil { w.l.PushBoolean(false) } else { - w.l.PushString(ledis.String(b)) + w.l.PushString(hack.String(b)) } } @@ -81,7 +82,7 @@ func (w *luaWriter) writeSliceArray(lst [][]byte) { w.l.CreateTable(len(lst), 0) for i, v := range lst { - w.l.PushString(ledis.String(v)) + w.l.PushString(hack.String(v)) w.l.RawSeti(-2, i+1) } } @@ -94,10 +95,10 @@ func (w *luaWriter) writeFVPairArray(lst []ledis.FVPair) { w.l.CreateTable(len(lst)*2, 0) for i, v := range lst { - w.l.PushString(ledis.String(v.Field)) + w.l.PushString(hack.String(v.Field)) w.l.RawSeti(-2, 2*i+1) - w.l.PushString(ledis.String(v.Value)) + w.l.PushString(hack.String(v.Value)) w.l.RawSeti(-2, 2*i+2) } } @@ -111,16 +112,16 @@ func (w *luaWriter) writeScorePairArray(lst []ledis.ScorePair, withScores bool) if withScores { w.l.CreateTable(len(lst)*2, 0) for i, v := range lst { - w.l.PushString(ledis.String(v.Member)) + w.l.PushString(hack.String(v.Member)) w.l.RawSeti(-2, 2*i+1) - w.l.PushString(ledis.String(ledis.StrPutInt64(v.Score))) + w.l.PushString(hack.String(ledis.StrPutInt64(v.Score))) w.l.RawSeti(-2, 2*i+2) } } else { w.l.CreateTable(len(lst), 0) for i, v := range lst { - w.l.PushString(ledis.String(v.Member)) + w.l.PushString(hack.String(v.Member)) w.l.RawSeti(-2, i+1) } } @@ -280,7 +281,7 @@ func luaSha1Hex(l *lua.State) int { } s := l.ToString(1) - s = hex.EncodeToString(ledis.Slice(s)) + s = hex.EncodeToString(hack.Slice(s)) l.PushString(s) return 1 @@ -333,7 +334,7 @@ func luaSetGlobalArray(l *lua.State, name string, ay [][]byte) { l.NewTable() for i := 0; i < len(ay); i++ { - l.PushString(ledis.String(ay[i])) + l.PushString(hack.String(ay[i])) l.RawSeti(-2, i+1) } @@ -348,7 +349,7 @@ func luaReplyToLedisReply(l *lua.State) interface{} { switch l.Type(-1) { case lua.LUA_TSTRING: - return ledis.Slice(l.ToString(-1)) + return hack.Slice(l.ToString(-1)) case lua.LUA_TBOOLEAN: if l.ToBoolean(-1) { return int64(1) diff --git a/server/util.go b/server/util.go index c015b60..abd6536 100644 --- a/server/util.go +++ b/server/util.go @@ -3,7 +3,7 @@ package server import ( "bufio" "errors" - "github.com/siddontang/ledisdb/ledis" + "github.com/siddontang/go/hack" "io" "strconv" ) @@ -36,7 +36,7 @@ func ReadBulkTo(rb *bufio.Reader, w io.Writer) error { } else if l[0] == '$' { var n int //handle resp string - if n, err = strconv.Atoi(ledis.String(l[1:])); err != nil { + if n, err = strconv.Atoi(hack.String(l[1:])); err != nil { return err } else if n == -1 { return nil From b7de9e6354d564b8b280e39b5310160f2eb022c7 Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 24 Sep 2014 13:29:27 +0800 Subject: [PATCH 16/42] use tiny go package --- ledis/t_hash.go | 3 ++- ledis/t_kv.go | 3 ++- ledis/t_set.go | 6 +++--- ledis/t_zset.go | 12 ++++++------ ledis/util.go | 22 +++++++--------------- server/client_resp.go | 5 +++-- server/cmd_zset.go | 6 +++--- server/script.go | 3 ++- 8 files changed, 28 insertions(+), 32 deletions(-) diff --git a/ledis/t_hash.go b/ledis/t_hash.go index 952ddae..a2e0bd3 100644 --- a/ledis/t_hash.go +++ b/ledis/t_hash.go @@ -3,6 +3,7 @@ package ledis import ( "encoding/binary" "errors" + "github.com/siddontang/go/num" "github.com/siddontang/ledisdb/store" "time" ) @@ -332,7 +333,7 @@ func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { n += delta - _, err = db.hSetItem(key, field, StrPutInt64(n)) + _, err = db.hSetItem(key, field, num.FormatInt64ToSlice(n)) if err != nil { return 0, err } diff --git a/ledis/t_kv.go b/ledis/t_kv.go index ce0874a..14d477b 100644 --- a/ledis/t_kv.go +++ b/ledis/t_kv.go @@ -2,6 +2,7 @@ package ledis import ( "errors" + "github.com/siddontang/go/num" "time" ) @@ -75,7 +76,7 @@ func (db *DB) incr(key []byte, delta int64) (int64, error) { n += delta - t.Put(key, StrPutInt64(n)) + t.Put(key, num.FormatInt64ToSlice(n)) err = t.Commit() return n, err diff --git a/ledis/t_set.go b/ledis/t_set.go index 7f41f53..2eb6c4c 100644 --- a/ledis/t_set.go +++ b/ledis/t_set.go @@ -509,14 +509,14 @@ func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, t.Put(ek, nil) } - var num = int64(len(v)) + var n = int64(len(v)) sk := db.sEncodeSizeKey(dstKey) - t.Put(sk, PutInt64(num)) + t.Put(sk, PutInt64(n)) if err = t.Commit(); err != nil { return 0, err } - return num, nil + return n, nil } func (db *DB) SClear(key []byte) (int64, error) { diff --git a/ledis/t_zset.go b/ledis/t_zset.go index d7cb2b1..3a2336f 100644 --- a/ledis/t_zset.go +++ b/ledis/t_zset.go @@ -858,14 +858,14 @@ func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, agg } } - var num = int64(len(destMap)) + var n = int64(len(destMap)) sk := db.zEncodeSizeKey(destKey) - t.Put(sk, PutInt64(num)) + t.Put(sk, PutInt64(n)) if err := t.Commit(); err != nil { return 0, err } - return num, nil + return n, nil } func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { @@ -926,14 +926,14 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg } } - var num int64 = int64(len(destMap)) + var n int64 = int64(len(destMap)) sk := db.zEncodeSizeKey(destKey) - t.Put(sk, PutInt64(num)) + t.Put(sk, PutInt64(n)) if err := t.Commit(); err != nil { return 0, err } - return num, nil + return n, nil } func (db *DB) ZScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { diff --git a/ledis/util.go b/ledis/util.go index 9454532..a0abdd0 100644 --- a/ledis/util.go +++ b/ledis/util.go @@ -4,13 +4,16 @@ import ( "encoding/binary" "errors" "github.com/siddontang/go/hack" - "reflect" "strconv" - "unsafe" ) var errIntNumber = errors.New("invalid integer") +/* + Below I forget why I use little endian to store int. + Maybe I was foolish at that time. +*/ + func Int64(v []byte, err error) (int64, error) { if err != nil { return 0, err @@ -36,11 +39,8 @@ func Uint64(v []byte, err error) (uint64, error) { } func PutInt64(v int64) []byte { - var b []byte - pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - pbytes.Data = uintptr(unsafe.Pointer(&v)) - pbytes.Len = 8 - pbytes.Cap = 8 + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, uint64(v)) return b } @@ -86,14 +86,6 @@ func StrInt8(v []byte, err error) (int8, error) { } } -func StrPutInt64(v int64) []byte { - return strconv.AppendInt(nil, v, 10) -} - -func StrPutUint64(v uint64) []byte { - return strconv.AppendUint(nil, v, 10) -} - func AsyncNotify(ch chan struct{}) { select { case ch <- struct{}{}: diff --git a/server/client_resp.go b/server/client_resp.go index ed72108..1b50bc3 100644 --- a/server/client_resp.go +++ b/server/client_resp.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/siddontang/go/hack" "github.com/siddontang/go/log" + "github.com/siddontang/go/num" "github.com/siddontang/ledisdb/ledis" "io" "net" @@ -171,7 +172,7 @@ func (w *respWriter) writeStatus(status string) { func (w *respWriter) writeInteger(n int64) { w.buff.WriteByte(':') - w.buff.Write(ledis.StrPutInt64(n)) + w.buff.Write(num.FormatInt64ToSlice(n)) w.buff.Write(Delims) } @@ -266,7 +267,7 @@ func (w *respWriter) writeScorePairArray(lst []ledis.ScorePair, withScores bool) w.writeBulk(lst[i].Member) if withScores { - w.writeBulk(ledis.StrPutInt64(lst[i].Score)) + w.writeBulk(num.FormatInt64ToSlice(lst[i].Score)) } } } diff --git a/server/cmd_zset.go b/server/cmd_zset.go index 9c58f5b..4763683 100644 --- a/server/cmd_zset.go +++ b/server/cmd_zset.go @@ -3,7 +3,7 @@ package server import ( "errors" "github.com/siddontang/go/hack" - + "github.com/siddontang/go/num" "github.com/siddontang/ledisdb/ledis" "math" "strconv" @@ -75,7 +75,7 @@ func zscoreCommand(c *client) error { return err } } else { - c.resp.writeBulk(ledis.StrPutInt64(s)) + c.resp.writeBulk(num.FormatInt64ToSlice(s)) } return nil @@ -112,7 +112,7 @@ func zincrbyCommand(c *client) error { v, err := c.db.ZIncrBy(key, delta, args[2]) if err == nil { - c.resp.writeBulk(ledis.StrPutInt64(v)) + c.resp.writeBulk(num.FormatInt64ToSlice(v)) } return err diff --git a/server/script.go b/server/script.go index 24566c3..711b412 100644 --- a/server/script.go +++ b/server/script.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" "github.com/siddontang/go/hack" + "github.com/siddontang/go/num" "github.com/siddontang/ledisdb/ledis" "github.com/siddontang/ledisdb/lua" "io" @@ -115,7 +116,7 @@ func (w *luaWriter) writeScorePairArray(lst []ledis.ScorePair, withScores bool) w.l.PushString(hack.String(v.Member)) w.l.RawSeti(-2, 2*i+1) - w.l.PushString(hack.String(ledis.StrPutInt64(v.Score))) + w.l.PushString(hack.String(num.FormatInt64ToSlice(v.Score))) w.l.RawSeti(-2, 2*i+2) } } else { From 0a64b592d54515abd68803de95fa95a105831409 Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 24 Sep 2014 15:51:09 +0800 Subject: [PATCH 17/42] add file lock to promise one instance for a path --- bootstrap.sh | 1 + ledis/ledis.go | 26 +++++++++++++++++++++----- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/bootstrap.sh b/bootstrap.sh index 4a12258..e6d4071 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -16,3 +16,4 @@ go get github.com/siddontang/go/bson go get github.com/siddontang/go/log go get github.com/siddontang/go/snappy go get github.com/siddontang/go/num +go get github.com/siddontang/go/filelock diff --git a/ledis/ledis.go b/ledis/ledis.go index 2669c3a..27f2ad3 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -2,10 +2,14 @@ package ledis import ( "fmt" + "github.com/siddontang/go/filelock" "github.com/siddontang/go/log" "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/rpl" "github.com/siddontang/ledisdb/store" + "io" + "os" + "path" "sync" "time" ) @@ -31,6 +35,8 @@ type Ledis struct { // for readonly mode, only replication can write readOnly bool + + lock io.Closer } func Open(cfg *config.Config) (*Ledis, error) { @@ -42,18 +48,23 @@ func Open2(cfg *config.Config, flags int) (*Ledis, error) { cfg.DataDir = config.DefaultDataDir } - ldb, err := store.Open(cfg) - if err != nil { - return nil, err - } + os.MkdirAll(cfg.DataDir, 0755) + + var err error l := new(Ledis) + if l.lock, err = filelock.Lock(path.Join(cfg.DataDir, "LOCK")); err != nil { + return nil, err + } + l.readOnly = (flags&ROnlyMode > 0) l.quit = make(chan struct{}) - l.ldb = ldb + if l.ldb, err = store.Open(cfg); err != nil { + return nil, err + } if cfg.UseReplication { if l.r, err = rpl.NewReplication(cfg); err != nil { @@ -91,6 +102,11 @@ func (l *Ledis) Close() { l.r.Close() l.r = nil } + + if l.lock != nil { + l.lock.Close() + l.lock = nil + } } func (l *Ledis) Select(index int) (*DB, error) { From 6d293ce152d4b301e029738cbd9375470dbd221d Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 24 Sep 2014 21:31:26 +0800 Subject: [PATCH 18/42] use async notify --- server/cmd_replication.go | 5 +---- server/replication.go | 17 +++++++++-------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/server/cmd_replication.go b/server/cmd_replication.go index 1433143..a261f42 100644 --- a/server/cmd_replication.go +++ b/server/cmd_replication.go @@ -83,10 +83,7 @@ func syncCommand(c *client) error { c.lastLogID = logId - 1 if c.ack != nil && logId > c.ack.id { - select { - case c.ack.ch <- logId: - default: - } + asyncNotifyUint64(c.ack.ch, logId) c.ack = nil } diff --git a/server/replication.go b/server/replication.go index 3616762..d90caab 100644 --- a/server/replication.go +++ b/server/replication.go @@ -54,10 +54,7 @@ func newMaster(app *App) *master { } func (m *master) Close() { - select { - case m.quit <- struct{}{}: - default: - } + ledis.AsyncNotify(m.quit) if m.conn != nil { m.conn.Close() @@ -262,10 +259,14 @@ func (app *App) removeSlave(c *client) { delete(app.slaves, c) if c.ack != nil { - select { - case c.ack.ch <- c.lastLogID: - default: - } + asyncNotifyUint64(c.ack.ch, c.lastLogID) + } +} + +func asyncNotifyUint64(ch chan uint64, v uint64) { + select { + case ch <- v: + default: } } From 595ead55bdf4917cae78f2a869e5f98333f9a855 Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 25 Sep 2014 10:21:50 +0800 Subject: [PATCH 19/42] bug fix: stop replication before close --- ledis/replication.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ledis/replication.go b/ledis/replication.go index e3002a7..bf7fac8 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -50,6 +50,9 @@ func (l *Ledis) handleReplication() { } func (l *Ledis) onReplication() { + l.wg.Add(1) + defer l.wg.Done() + AsyncNotify(l.rc) for { @@ -58,6 +61,8 @@ func (l *Ledis) onReplication() { l.handleReplication() case <-time.After(5 * time.Second): l.handleReplication() + case <-l.quit: + return } } } From dca71891c3b6ffb1e734b13dbb16d039eb92cae9 Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 25 Sep 2014 10:44:07 +0800 Subject: [PATCH 20/42] readd transaction --- client/ledis-py/ledis/client.py | 48 +++++- client/ledis-py/ledis/exceptions.py | 2 + client/ledis-py/tests/test_tx.py | 46 ++++++ client/nodejs/ledis/lib/commands.js | 4 + client/openresty/ledis.lua | 5 + cmd/ledis-cli/const.go | 5 +- doc/DiffRedis.md | 9 ++ doc/commands.json | 16 ++ doc/commands.md | 67 +++++++++ ledis/batch.go | 74 ++++++---- ledis/const.go | 5 +- ledis/ledis_db.go | 2 +- ledis/multi.go | 2 +- ledis/tx.go | 109 ++++++++++++++ ledis/tx_test.go | 220 ++++++++++++++++++++++++++++ server/client.go | 22 ++- server/client_http.go | 3 + server/cmd_tx.go | 57 +++++++ 18 files changed, 664 insertions(+), 32 deletions(-) create mode 100644 client/ledis-py/tests/test_tx.py create mode 100644 ledis/tx.go create mode 100644 ledis/tx_test.go create mode 100644 server/cmd_tx.go diff --git a/client/ledis-py/ledis/client.py b/client/ledis-py/ledis/client.py index 420900f..17cc1c4 100644 --- a/client/ledis-py/ledis/client.py +++ b/client/ledis-py/ledis/client.py @@ -9,7 +9,8 @@ from ledis.exceptions import ( ConnectionError, DataError, LedisError, - ResponseError + ResponseError, + TxNotBeginError ) SYM_EMPTY = b('') @@ -199,6 +200,11 @@ class Ledis(object): "Set a custom Response Callback" self.response_callbacks[command] = callback + def tx(self): + return Transaction( + self.connection_pool, + self.response_callbacks) + #### COMMAND EXECUTION AND PROTOCOL PARSING #### def execute_command(self, *args, **options): @@ -964,3 +970,43 @@ class Ledis(object): def scriptflush(self): return self.execute_command('SCRIPT', 'FLUSH') + + +class Transaction(Ledis): + def __init__(self, connection_pool, response_callbacks): + self.connection_pool = connection_pool + self.response_callbacks = response_callbacks + self.connection = None + + def execute_command(self, *args, **options): + "Execute a command and return a parsed response" + command_name = args[0] + + connection = self.connection + if self.connection is None: + raise TxNotBeginError + + try: + connection.send_command(*args) + return self.parse_response(connection, command_name, **options) + except ConnectionError: + connection.disconnect() + connection.send_command(*args) + return self.parse_response(connection, command_name, **options) + + def begin(self): + self.connection = self.connection_pool.get_connection('begin') + return self.execute_command("BEGIN") + + def commit(self): + res = self.execute_command("COMMIT") + self.connection_pool.release(self.connection) + self.connection = None + return res + + def rollback(self): + res = self.execute_command("ROLLBACK") + self.connection_pool.release(self.connection) + self.connection = None + return res + diff --git a/client/ledis-py/ledis/exceptions.py b/client/ledis-py/ledis/exceptions.py index f92e530..9150db6 100644 --- a/client/ledis-py/ledis/exceptions.py +++ b/client/ledis-py/ledis/exceptions.py @@ -35,3 +35,5 @@ class DataError(LedisError): class ExecAbortError(ResponseError): pass +class TxNotBeginError(LedisError): + pass \ No newline at end of file diff --git a/client/ledis-py/tests/test_tx.py b/client/ledis-py/tests/test_tx.py new file mode 100644 index 0000000..cfbab20 --- /dev/null +++ b/client/ledis-py/tests/test_tx.py @@ -0,0 +1,46 @@ +import unittest +import sys +sys.path.append("..") + +import ledis + +global_l = ledis.Ledis() + +#db that do not support transaction +dbs = ["leveldb", "rocksdb", "hyperleveldb", "goleveldb"] +check = global_l.info().get("db_name") in dbs + + +class TestTx(unittest.TestCase): + def setUp(self): + self.l = ledis.Ledis(port=6380) + + def tearDown(self): + self.l.flushdb() + + @unittest.skipIf(check, reason="db not support transaction") + def test_commit(self): + tx = self.l.tx() + self.l.set("a", "no-tx") + assert self.l.get("a") == "no-tx" + tx.begin() + tx.set("a", "tx") + assert self.l.get("a") == "no-tx" + assert tx.get("a") == "tx" + + tx.commit() + assert self.l.get("a") == "tx" + + @unittest.skipIf(check, reason="db not support transaction") + def test_rollback(self): + tx = self.l.tx() + self.l.set("a", "no-tx") + assert self.l.get("a") == "no-tx" + + tx.begin() + tx.set("a", "tx") + assert tx.get("a") == "tx" + assert self.l.get("a") == "no-tx" + + tx.rollback() + assert self.l.get("a") == "no-tx" \ No newline at end of file diff --git a/client/nodejs/ledis/lib/commands.js b/client/nodejs/ledis/lib/commands.js index 41dc97e..f116444 100644 --- a/client/nodejs/ledis/lib/commands.js +++ b/client/nodejs/ledis/lib/commands.js @@ -125,6 +125,10 @@ module.exports = [ "spersist", "sxscan", + "begin", + "rollback", + "commit", + "eval", "evalsha", "script", diff --git a/client/openresty/ledis.lua b/client/openresty/ledis.lua index 26a384a..07c3f2b 100644 --- a/client/openresty/ledis.lua +++ b/client/openresty/ledis.lua @@ -148,6 +148,11 @@ local commands = { "flushall", "flushdb", + -- [[transaction]] + "begin", + "commit", + "rollback", + -- [[script]] "eval", "evalsha", diff --git a/cmd/ledis-cli/const.go b/cmd/ledis-cli/const.go index 4fc7f67..842866b 100644 --- a/cmd/ledis-cli/const.go +++ b/cmd/ledis-cli/const.go @@ -1,9 +1,10 @@ -//This file was generated by .tools/generate_commands.py on Tue Sep 09 2014 09:48:57 +0800 +//This file was generated by .tools/generate_commands.py on Thu Sep 25 2014 09:51:10 +0800 package main var helpCommands = [][]string{ {"BCOUNT", "key [start end]", "Bitmap"}, {"BDELETE", "key", "ZSet"}, + {"BEGIN", "-", "Transaction"}, {"BEXPIRE", "key seconds", "Bitmap"}, {"BEXPIREAT", "key timestamp", "Bitmap"}, {"BGET", "key", "Bitmap"}, @@ -14,6 +15,7 @@ var helpCommands = [][]string{ {"BSETBIT", "key offset value", "Bitmap"}, {"BTTL", "key", "Bitmap"}, {"BXSCAN", "key [MATCH match] [COUNT count]", "Bitmap"}, + {"COMMIT", "-", "Transaction"}, {"DECR", "key", "KV"}, {"DECRBY", "key decrement", "KV"}, {"DEL", "key [key ...]", "KV"}, @@ -65,6 +67,7 @@ var helpCommands = [][]string{ {"MSET", "key value [key value ...]", "KV"}, {"PERSIST", "key", "KV"}, {"PING", "-", "Server"}, + {"ROLLBACK", "-", "Transaction"}, {"RPOP", "key", "List"}, {"RPUSH", "key value [value ...]", "List"}, {"SADD", "key member [member ...]", "Set"}, diff --git a/doc/DiffRedis.md b/doc/DiffRedis.md index 5c597fe..ee1618c 100644 --- a/doc/DiffRedis.md +++ b/doc/DiffRedis.md @@ -35,6 +35,15 @@ The same for Del. ZSet only support int64 score, not double in Redis. +## Transaction + +LedisDB supports ACID transaction using LMDB or BoltDB, maybe later it will support `multi`, `exec`, `discard`. + +Transaction API: + ++ `begin` ++ `commit` ++ `rollback` ## Scan diff --git a/doc/commands.json b/doc/commands.json index 921a688..828186d 100644 --- a/doc/commands.json +++ b/doc/commands.json @@ -512,6 +512,22 @@ "readonly": false }, + "BEGIN": { + "arguments": "-", + "group": "Transaction", + "readonly": false + }, + "COMMIT": { + "arguments": "-", + "group": "Transaction", + "readonly": false + }, + "ROLLBACK": { + "arguments": "-", + "group": "Transaction", + "readonly": false + }, + "XSCAN": { "arguments": "key [MATCH match] [COUNT count]", "group": "KV", diff --git a/doc/commands.md b/doc/commands.md index 0317384..0809131 100644 --- a/doc/commands.md +++ b/doc/commands.md @@ -129,6 +129,10 @@ Table of Contents - [FLUSHALL](#flushall) - [FLUSHDB](#flushdb) - [INFO [section]](#info-section) +- [Transaction](#transaction) + - [BEGIN](#begin) + - [ROLLBACK](#rollback) + - [COMMIT](#commit) - [Script](#script) - [EVAL script numkeys key [key ...] arg [arg ...]](#eval-script-numkeys-key-key--arg-arg-) - [EVALSHA sha1 numkeys key [key ...] arg [arg ...]](#evalsha-sha1-numkeys-key-key--arg-arg-) @@ -2498,6 +2502,69 @@ The optional parameter can be used to select a specific section of information: When no parameter is provided, all will return. +## Transaction + +### BEGIN + +Marks the start of a transaction block. Subsequent commands will be in a transaction context util using COMMIT or ROLLBACK. + +You must known that `BEGIN` will block any other write operators before you `COMMIT` or `ROLLBACK`. Don't use long-time transaction. + +**Return value** + +Returns `OK` if the backend store engine in use supports transaction, otherwise, returns `Err`. + +**Examples** +``` +ledis> BEGIN +OK +ledis> SET HELLO WORLD +OK +ledis> COMMIT +OK +``` + +### ROLLBACK + +Discards all the changes of previously commands in a transaction and restores the connection state to normal. + +**Return value** +Returns `OK` if in a transaction context, otherwise, `Err` + +**Examples** +``` +ledis> BEGIN +OK +ledis> SET HELLO WORLD +OK +ledis> GET HELLO +"WORLD" +ledis> ROLLBACK +OK +ledis> GET HELLO +(nil) +``` + +### COMMIT + +Persists the changes of all the commands in a transaction and restores the connection state to normal. + +**Return value** +Returns `OK` if in a transaction context, otherwise, `Err` + +**Examples** +``` +ledis> BEGIN +OK +ledis> SET HELLO WORLD +OK +ledis> GET HELLO +"WORLD" +ledis> COMMIT +OK +ledis> GET HELLO +"WORLD" +``` ## Script diff --git a/ledis/batch.go b/ledis/batch.go index 4ed3bac..c77e91f 100644 --- a/ledis/batch.go +++ b/ledis/batch.go @@ -14,6 +14,8 @@ type batch struct { sync.Locker + tx *Tx + eb *eventBatch } @@ -22,31 +24,12 @@ func (b *batch) Commit() error { return ErrWriteInROnly } - b.l.commitLock.Lock() - defer b.l.commitLock.Unlock() - - var err error - if b.l.r != nil { - var l *rpl.Log - if l, err = b.l.r.Log(b.eb.Bytes()); err != nil { - log.Fatal("write wal error %s", err.Error()) - return err - } - - b.l.propagate(l) - - if err = b.WriteBatch.Commit(); err != nil { - log.Fatal("commit error %s", err.Error()) - return err - } - - if err = b.l.r.UpdateCommitID(l.ID); err != nil { - log.Fatal("update commit id error %s", err.Error()) - return err - } - - return nil + if b.tx == nil { + return b.l.handleCommit(b.eb, b.WriteBatch) } else { + if b.l.r != nil { + b.tx.eb.Write(b.eb.Bytes()) + } return b.WriteBatch.Commit() } } @@ -93,20 +76,61 @@ func (l *dbBatchLocker) Unlock() { l.wrLock.RUnlock() } +type txBatchLocker struct { +} + +func (l *txBatchLocker) Lock() {} +func (l *txBatchLocker) Unlock() {} + type multiBatchLocker struct { } func (l *multiBatchLocker) Lock() {} func (l *multiBatchLocker) Unlock() {} -func (l *Ledis) newBatch(wb store.WriteBatch, locker sync.Locker) *batch { +func (l *Ledis) newBatch(wb store.WriteBatch, locker sync.Locker, tx *Tx) *batch { b := new(batch) b.l = l b.WriteBatch = wb b.Locker = locker + b.tx = tx b.eb = new(eventBatch) return b } + +type commiter interface { + Commit() error +} + +func (l *Ledis) handleCommit(eb *eventBatch, c commiter) error { + l.commitLock.Lock() + defer l.commitLock.Unlock() + + var err error + if l.r != nil { + var rl *rpl.Log + if rl, err = l.r.Log(eb.Bytes()); err != nil { + log.Fatal("write wal error %s", err.Error()) + return err + } + + l.propagate(rl) + + if err = c.Commit(); err != nil { + log.Fatal("commit error %s", err.Error()) + return err + } + + if err = l.r.UpdateCommitID(rl.ID); err != nil { + log.Fatal("update commit id error %s", err.Error()) + return err + } + + return nil + } else { + return c.Commit() + } +} diff --git a/ledis/const.go b/ledis/const.go index 7144629..3b30123 100644 --- a/ledis/const.go +++ b/ledis/const.go @@ -92,6 +92,7 @@ var ( ) const ( - DBAutoCommit uint8 = 0x0 - DBInMulti uint8 = 0x2 + DBAutoCommit uint8 = 0x0 + DBInTransaction uint8 = 0x1 + DBInMulti uint8 = 0x2 ) diff --git a/ledis/ledis_db.go b/ledis/ledis_db.go index 70eaf5a..6a8eb9c 100644 --- a/ledis/ledis_db.go +++ b/ledis/ledis_db.go @@ -64,7 +64,7 @@ func (l *Ledis) newDB(index uint8) *DB { } func (db *DB) newBatch() *batch { - return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}) + return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}, nil) } func (db *DB) Index() int { diff --git a/ledis/multi.go b/ledis/multi.go index 0ae4727..a549c2c 100644 --- a/ledis/multi.go +++ b/ledis/multi.go @@ -51,7 +51,7 @@ func (db *DB) Multi() (*Multi, error) { } func (m *Multi) newBatch() *batch { - return m.l.newBatch(m.bucket.NewWriteBatch(), &multiBatchLocker{}) + return m.l.newBatch(m.bucket.NewWriteBatch(), &multiBatchLocker{}, nil) } func (m *Multi) Close() error { diff --git a/ledis/tx.go b/ledis/tx.go new file mode 100644 index 0000000..a5ff883 --- /dev/null +++ b/ledis/tx.go @@ -0,0 +1,109 @@ +package ledis + +import ( + "errors" + "fmt" + "github.com/siddontang/ledisdb/store" +) + +var ( + ErrNestTx = errors.New("nest transaction not supported") + ErrTxDone = errors.New("Transaction has already been committed or rolled back") +) + +type Tx struct { + *DB + + tx *store.Tx + + eb *eventBatch +} + +func (db *DB) IsTransaction() bool { + return db.status == DBInTransaction +} + +// Begin a transaction, it will block all other write operations before calling Commit or Rollback. +// You must be very careful to prevent long-time transaction. +func (db *DB) Begin() (*Tx, error) { + if db.IsTransaction() { + return nil, ErrNestTx + } + + tx := new(Tx) + + tx.eb = new(eventBatch) + + tx.DB = new(DB) + tx.DB.l = db.l + + tx.l.wLock.Lock() + + tx.DB.sdb = db.sdb + + var err error + tx.tx, err = db.sdb.Begin() + if err != nil { + tx.l.wLock.Unlock() + return nil, err + } + + tx.DB.bucket = tx.tx + + tx.DB.status = DBInTransaction + + tx.DB.index = db.index + + tx.DB.kvBatch = tx.newBatch() + tx.DB.listBatch = tx.newBatch() + tx.DB.hashBatch = tx.newBatch() + tx.DB.zsetBatch = tx.newBatch() + tx.DB.binBatch = tx.newBatch() + tx.DB.setBatch = tx.newBatch() + + return tx, nil +} + +func (tx *Tx) Commit() error { + if tx.tx == nil { + return ErrTxDone + } + + err := tx.l.handleCommit(tx.eb, tx.tx) + + tx.tx = nil + + tx.l.wLock.Unlock() + + tx.DB.bucket = nil + + return err +} + +func (tx *Tx) Rollback() error { + if tx.tx == nil { + return ErrTxDone + } + + err := tx.tx.Rollback() + tx.eb.Reset() + tx.tx = nil + + tx.l.wLock.Unlock() + tx.DB.bucket = nil + + return err +} + +func (tx *Tx) newBatch() *batch { + return tx.l.newBatch(tx.tx.NewWriteBatch(), &txBatchLocker{}, tx) +} + +func (tx *Tx) Select(index int) error { + if index < 0 || index >= int(MaxDBNumber) { + return fmt.Errorf("invalid db index %d", index) + } + + tx.DB.index = uint8(index) + return nil +} diff --git a/ledis/tx_test.go b/ledis/tx_test.go new file mode 100644 index 0000000..cb3a7f0 --- /dev/null +++ b/ledis/tx_test.go @@ -0,0 +1,220 @@ +package ledis + +import ( + "github.com/siddontang/ledisdb/config" + "os" + "testing" +) + +func testTxRollback(t *testing.T, db *DB) { + var err error + key1 := []byte("tx_key1") + key2 := []byte("tx_key2") + field2 := []byte("tx_field2") + + err = db.Set(key1, []byte("value")) + if err != nil { + t.Fatal(err) + } + + _, err = db.HSet(key2, field2, []byte("value")) + if err != nil { + t.Fatal(err) + } + + var tx *Tx + tx, err = db.Begin() + if err != nil { + t.Fatal(err) + } + + defer tx.Rollback() + + err = tx.Set(key1, []byte("1")) + + if err != nil { + t.Fatal(err) + } + + _, err = tx.HSet(key2, field2, []byte("2")) + + if err != nil { + t.Fatal(err) + } + + _, err = tx.HSet([]byte("no_key"), field2, []byte("2")) + + if err != nil { + t.Fatal(err) + } + + if v, err := tx.Get(key1); err != nil { + t.Fatal(err) + } else if string(v) != "1" { + t.Fatal(string(v)) + } + + if v, err := tx.HGet(key2, field2); err != nil { + t.Fatal(err) + } else if string(v) != "2" { + t.Fatal(string(v)) + } + + err = tx.Rollback() + if err != nil { + t.Fatal(err) + } + + if v, err := db.Get(key1); err != nil { + t.Fatal(err) + } else if string(v) != "value" { + t.Fatal(string(v)) + } + + if v, err := db.HGet(key2, field2); err != nil { + t.Fatal(err) + } else if string(v) != "value" { + t.Fatal(string(v)) + } +} + +func testTxCommit(t *testing.T, db *DB) { + var err error + key1 := []byte("tx_key1") + key2 := []byte("tx_key2") + field2 := []byte("tx_field2") + + err = db.Set(key1, []byte("value")) + if err != nil { + t.Fatal(err) + } + + _, err = db.HSet(key2, field2, []byte("value")) + if err != nil { + t.Fatal(err) + } + + var tx *Tx + tx, err = db.Begin() + if err != nil { + t.Fatal(err) + } + + defer tx.Rollback() + + err = tx.Set(key1, []byte("1")) + + if err != nil { + t.Fatal(err) + } + + _, err = tx.HSet(key2, field2, []byte("2")) + + if err != nil { + t.Fatal(err) + } + + if v, err := tx.Get(key1); err != nil { + t.Fatal(err) + } else if string(v) != "1" { + t.Fatal(string(v)) + } + + if v, err := tx.HGet(key2, field2); err != nil { + t.Fatal(err) + } else if string(v) != "2" { + t.Fatal(string(v)) + } + + err = tx.Commit() + if err != nil { + t.Fatal(err) + } + + if v, err := db.Get(key1); err != nil { + t.Fatal(err) + } else if string(v) != "1" { + t.Fatal(string(v)) + } + + if v, err := db.HGet(key2, field2); err != nil { + t.Fatal(err) + } else if string(v) != "2" { + t.Fatal(string(v)) + } +} + +func testTxSelect(t *testing.T, db *DB) { + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + + defer tx.Rollback() + + tx.Set([]byte("tx_select_1"), []byte("a")) + + tx.Select(1) + + tx.Set([]byte("tx_select_2"), []byte("b")) + + if err = tx.Commit(); err != nil { + t.Fatal(err) + } + + if v, err := db.Get([]byte("tx_select_1")); err != nil { + t.Fatal(err) + } else if string(v) != "a" { + t.Fatal(string(v)) + } + + if v, err := db.Get([]byte("tx_select_2")); err != nil { + t.Fatal(err) + } else if v != nil { + t.Fatal("must nil") + } + + db, _ = db.l.Select(1) + + if v, err := db.Get([]byte("tx_select_2")); err != nil { + t.Fatal(err) + } else if string(v) != "b" { + t.Fatal(string(v)) + } + + if v, err := db.Get([]byte("tx_select_1")); err != nil { + t.Fatal(err) + } else if v != nil { + t.Fatal("must nil") + } +} + +func testTx(t *testing.T, name string) { + cfg := new(config.Config) + cfg.DataDir = "/tmp/ledis_test_tx" + + cfg.DBName = name + cfg.LMDB.MapSize = 10 * 1024 * 1024 + cfg.UseReplication = true + + os.RemoveAll(cfg.DataDir) + + l, err := Open(cfg) + if err != nil { + t.Fatal(err) + } + + defer l.Close() + + db, _ := l.Select(0) + + testTxRollback(t, db) + testTxCommit(t, db) + testTxSelect(t, db) +} + +//only lmdb, boltdb support Transaction +func TestTx(t *testing.T) { + testTx(t, "lmdb") + testTx(t, "boltdb") +} diff --git a/server/client.go b/server/client.go index 4474086..2ccea7a 100644 --- a/server/client.go +++ b/server/client.go @@ -8,10 +8,24 @@ import ( "time" ) +var txUnsupportedCmds = map[string]struct{}{ + "select": struct{}{}, + "slaveof": struct{}{}, + "fullsync": struct{}{}, + "sync": struct{}{}, + "begin": struct{}{}, + "flushall": struct{}{}, + "flushdb": struct{}{}, + "eval": struct{}{}, +} + var scriptUnsupportedCmds = map[string]struct{}{ "slaveof": struct{}{}, "fullsync": struct{}{}, "sync": struct{}{}, + "begin": struct{}{}, + "commit": struct{}{}, + "rollback": struct{}{}, "flushall": struct{}{}, "flushdb": struct{}{}, } @@ -57,6 +71,8 @@ type client struct { buf bytes.Buffer + tx *ledis.Tx + script *ledis.Multi } @@ -83,7 +99,11 @@ func (c *client) perform() { } else if exeCmd, ok := regCmds[c.cmd]; !ok { err = ErrNotFound } else { - if c.db.IsInMulti() { + if c.db.IsTransaction() { + if _, ok := txUnsupportedCmds[c.cmd]; ok { + err = fmt.Errorf("%s not supported in transaction", c.cmd) + } + } else if c.db.IsInMulti() { if _, ok := scriptUnsupportedCmds[c.cmd]; ok { err = fmt.Errorf("%s not supported in multi", c.cmd) } diff --git a/server/client_http.go b/server/client_http.go index 115e44b..057ba6b 100644 --- a/server/client_http.go +++ b/server/client_http.go @@ -24,6 +24,9 @@ var httpUnsupportedCommands = map[string]struct{}{ "fullsync": struct{}{}, "sync": struct{}{}, "quit": struct{}{}, + "begin": struct{}{}, + "commit": struct{}{}, + "rollback": struct{}{}, } type httpClient struct { diff --git a/server/cmd_tx.go b/server/cmd_tx.go new file mode 100644 index 0000000..19eb5c1 --- /dev/null +++ b/server/cmd_tx.go @@ -0,0 +1,57 @@ +package server + +import ( + "errors" +) + +var errTxMiss = errors.New("transaction miss") + +func beginCommand(c *client) error { + tx, err := c.db.Begin() + if err == nil { + c.tx = tx + c.db = tx.DB + c.resp.writeStatus(OK) + } + + return err +} + +func commitCommand(c *client) error { + if c.tx == nil { + return errTxMiss + } + + err := c.tx.Commit() + c.db, _ = c.ldb.Select(c.tx.Index()) + c.tx = nil + + if err == nil { + c.resp.writeStatus(OK) + } + + return err +} + +func rollbackCommand(c *client) error { + if c.tx == nil { + return errTxMiss + } + + err := c.tx.Rollback() + + c.db, _ = c.ldb.Select(c.tx.Index()) + c.tx = nil + + if err == nil { + c.resp.writeStatus(OK) + } + + return err +} + +func init() { + register("begin", beginCommand) + register("commit", commitCommand) + register("rollback", rollbackCommand) +} From 962a0873300d88e12c76ca5d964fe6aa9f261f68 Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 25 Sep 2014 16:03:29 +0800 Subject: [PATCH 21/42] update slaveof and readonly --- config/config.toml | 1 + ledis/ledis.go | 2 +- server/app.go | 8 +++++++- server/cmd_server.go | 14 -------------- server/replication.go | 10 ++++++---- 5 files changed, 15 insertions(+), 20 deletions(-) diff --git a/config/config.toml b/config/config.toml index ac0aab9..d3faf17 100644 --- a/config/config.toml +++ b/config/config.toml @@ -13,6 +13,7 @@ data_dir = "/tmp/ledis_server" access_log = "" # Set slaveof to enable replication from master, empty, no replication +# Any write operations except flushall and replication will be disabled in slave mode. slaveof = "" # Choose which backend storage to use, now support: diff --git a/ledis/ledis.go b/ledis/ledis.go index 27f2ad3..0052b3a 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -33,7 +33,7 @@ type Ledis struct { wLock sync.RWMutex //allow one write at same time commitLock sync.Mutex //allow one write commit at same time - // for readonly mode, only replication can write + // for readonly mode, only replication and flushall can write readOnly bool lock io.Closer diff --git a/server/app.go b/server/app.go index 74a0b23..62f8514 100644 --- a/server/app.go +++ b/server/app.go @@ -88,7 +88,13 @@ func NewApp(cfg *config.Config) (*App, error) { } } - if app.ldb, err = ledis.Open(cfg); err != nil { + flag := ledis.RDWRMode + if len(app.cfg.SlaveOf) > 0 { + //slave must readonly + flag = ledis.ROnlyMode + } + + if app.ldb, err = ledis.Open2(cfg, flag); err != nil { return nil, err } diff --git a/server/cmd_server.go b/server/cmd_server.go index 1d7a1cb..ab8051f 100644 --- a/server/cmd_server.go +++ b/server/cmd_server.go @@ -82,20 +82,6 @@ func flushdbCommand(c *client) error { return nil } -func readonlyCommand(c *client) error { - if len(c.args) != 1 { - return ErrCmdParams - } - - if flag, err := strconv.Atoi(hack.String(c.args[0])); err != nil { - return err - } else { - c.app.ldb.SetReadOnly(flag != 0) - c.resp.writeStatus(OK) - } - return nil -} - func init() { register("ping", pingCommand) register("echo", echoCommand) diff --git a/server/replication.go b/server/replication.go index d90caab..8d49f43 100644 --- a/server/replication.go +++ b/server/replication.go @@ -87,8 +87,6 @@ func (m *master) connect() error { func (m *master) stopReplication() error { m.Close() - m.app.ldb.SetReadOnly(false) - return nil } @@ -126,7 +124,7 @@ func (m *master) runReplication() { if err := m.sync(); err != nil { if m.conn != nil { //if conn == nil, other close the replication, not error - log.Warn("sync error %s", err.Error()) + log.Error("sync error %s", err.Error()) } return } @@ -237,7 +235,11 @@ func (app *App) slaveof(masterAddr string) error { } if len(masterAddr) == 0 { - return app.m.stopReplication() + if err := app.m.stopReplication(); err != nil { + return err + } + + app.ldb.SetReadOnly(false) } else { return app.m.startReplication(masterAddr) } From 05135a5ac99dc81c2b97149d4567c1fec27c8941 Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 25 Sep 2014 22:33:09 +0800 Subject: [PATCH 22/42] move nextsynclogid to server --- ledis/replication.go | 17 ----------------- server/replication.go | 15 ++++++++++++++- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/ledis/replication.go b/ledis/replication.go index bf7fac8..02c3e4e 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -187,23 +187,6 @@ func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int) ( return l.ReadLogsTo(startLogID, w) } -func (l *Ledis) NextSyncLogID() (uint64, error) { - if !l.ReplicationUsed() { - return 0, ErrRplNotSupport - } - - s, err := l.r.Stat() - if err != nil { - return 0, err - } - - if s.LastID > s.CommitID { - return s.LastID + 1, nil - } else { - return s.CommitID + 1, nil - } -} - func (l *Ledis) propagate(rl *rpl.Log) { for _, h := range l.rhs { h(rl) diff --git a/server/replication.go b/server/replication.go index 8d49f43..3dd767e 100644 --- a/server/replication.go +++ b/server/replication.go @@ -176,10 +176,23 @@ func (m *master) fullSync() error { return nil } +func (m *master) nextSyncLogID() (uint64, error) { + s, err := m.app.ldb.ReplicationStat() + if err != nil { + return 0, err + } + + if s.LastID > s.CommitID { + return s.LastID + 1, nil + } else { + return s.CommitID + 1, nil + } +} + func (m *master) sync() error { var err error var syncID uint64 - if syncID, err = m.app.ldb.NextSyncLogID(); err != nil { + if syncID, err = m.nextSyncLogID(); err != nil { return err } From b2a8b70e54ed78da4b2d82f4a00dc23394f3da99 Mon Sep 17 00:00:00 2001 From: siddontang Date: Sat, 27 Sep 2014 09:10:08 +0800 Subject: [PATCH 23/42] add compression for log --- config/config.go | 2 ++ config/config.toml | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index e2381a7..5a8070b 100644 --- a/config/config.go +++ b/config/config.go @@ -34,6 +34,7 @@ type ReplicationConfig struct { ExpiredLogDays int `toml:"expired_log_days"` Sync bool `toml:"sync"` WaitSyncTime int `toml:"wait_sync_time"` + Compression bool `toml:"compression"` } type Config struct { @@ -98,6 +99,7 @@ func NewConfigDefault() *Config { cfg.LMDB.NoSync = true cfg.Replication.WaitSyncTime = 1 + cfg.Replication.Compression = true return cfg } diff --git a/config/config.toml b/config/config.toml index d3faf17..fbe86a6 100644 --- a/config/config.toml +++ b/config/config.toml @@ -54,9 +54,11 @@ path = "" expired_log_days = 7 # If sync is true, the new log must be sent to some slaves, and then commit. -# It may affect performance. +# It will reduce performance but have better high availability. sync = true -# If sync is true, wait at last wait_sync_time seconds to check whether slave sync this log +# If sync is true, wait at last wait_sync_time seconds for slave syncing this log wait_sync_time = 1 +# Compress the log or not +compression = true From f49a3bbece00bc5968b39ca67738fb516da786d8 Mon Sep 17 00:00:00 2001 From: siddontang Date: Sat, 27 Sep 2014 10:08:45 +0800 Subject: [PATCH 24/42] add compression for log --- ledis/replication.go | 12 ++++++++++++ ledis/replication_test.go | 1 + rpl/log.go | 23 ++++++++++------------- rpl/rpl.go | 15 +++++++++++++++ server/client.go | 4 +--- server/cmd_replication.go | 10 ---------- server/replication.go | 14 +------------- 7 files changed, 40 insertions(+), 39 deletions(-) diff --git a/ledis/replication.go b/ledis/replication.go index 02c3e4e..1fa1531 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "github.com/siddontang/go/log" + "github.com/siddontang/go/snappy" "github.com/siddontang/ledisdb/rpl" "io" "time" @@ -37,12 +38,23 @@ func (l *Ledis) handleReplication() { } } else { l.rbatch.Rollback() + + if rl.Compression == 1 { + //todo optimize + if rl.Data, err = snappy.Decode(nil, rl.Data); err != nil { + log.Error("decode log error %s", err.Error()) + return + } + } + decodeEventBatch(l.rbatch, rl.Data) if err := l.rbatch.Commit(); err != nil { log.Error("commit log error %s", err.Error()) + return } else if err = l.r.UpdateCommitID(rl.ID); err != nil { log.Error("update commit id error %s", err.Error()) + return } } diff --git a/ledis/replication_test.go b/ledis/replication_test.go index 6e277a9..c300ef8 100644 --- a/ledis/replication_test.go +++ b/ledis/replication_test.go @@ -34,6 +34,7 @@ func TestReplication(t *testing.T) { cfgM.DataDir = "/tmp/test_repl/master" cfgM.UseReplication = true + cfgM.Replication.Compression = true os.RemoveAll(cfgM.DataDir) diff --git a/rpl/log.go b/rpl/log.go index 775ea5d..261e852 100644 --- a/rpl/log.go +++ b/rpl/log.go @@ -4,27 +4,18 @@ import ( "bytes" "encoding/binary" "io" - "time" ) type Log struct { - ID uint64 - CreateTime uint32 + ID uint64 + CreateTime uint32 + Compression uint8 Data []byte } -func NewLog(id uint64, data []byte) *Log { - l := new(Log) - l.ID = id - l.CreateTime = uint32(time.Now().Unix()) - l.Data = data - - return l -} - func (l *Log) HeadSize() int { - return 16 + return 17 } func (l *Log) Size() int { @@ -58,6 +49,9 @@ func (l *Log) Encode(w io.Writer) error { binary.BigEndian.PutUint32(buf[pos:], l.CreateTime) pos += 4 + buf[pos] = l.Compression + pos++ + binary.BigEndian.PutUint32(buf[pos:], uint32(len(l.Data))) if n, err := w.Write(buf); err != nil { @@ -88,6 +82,9 @@ func (l *Log) Decode(r io.Reader) error { l.CreateTime = binary.BigEndian.Uint32(buf[pos:]) pos += 4 + l.Compression = uint8(buf[pos]) + pos++ + length := binary.BigEndian.Uint32(buf[pos:]) l.Data = l.Data[0:0] diff --git a/rpl/rpl.go b/rpl/rpl.go index f7324c8..3eaad9a 100644 --- a/rpl/rpl.go +++ b/rpl/rpl.go @@ -3,6 +3,7 @@ package rpl import ( "encoding/binary" "github.com/siddontang/go/log" + "github.com/siddontang/go/snappy" "github.com/siddontang/ledisdb/config" "os" "path" @@ -86,6 +87,14 @@ func (r *Replication) Close() error { } func (r *Replication) Log(data []byte) (*Log, error) { + if r.cfg.Replication.Compression { + //todo optimize + var err error + if data, err = snappy.Encode(nil, data); err != nil { + return nil, err + } + } + r.m.Lock() defer r.m.Unlock() @@ -103,6 +112,12 @@ func (r *Replication) Log(data []byte) (*Log, error) { l.ID = lastID + 1 l.CreateTime = uint32(time.Now().Unix()) + if r.cfg.Replication.Compression { + l.Compression = 1 + } else { + l.Compression = 0 + } + l.Data = data if err = r.s.StoreLog(l); err != nil { diff --git a/server/client.go b/server/client.go index 2ccea7a..ef9de76 100644 --- a/server/client.go +++ b/server/client.go @@ -60,8 +60,7 @@ type client struct { resp responseWriter - syncBuf bytes.Buffer - compressBuf []byte + syncBuf bytes.Buffer lastLogID uint64 @@ -83,7 +82,6 @@ func newClient(app *App) *client { c.ldb = app.ldb c.db, _ = app.ldb.Select(0) //use default db - c.compressBuf = []byte{} c.reqErr = make(chan error) return c diff --git a/server/cmd_replication.go b/server/cmd_replication.go index a261f42..ba091aa 100644 --- a/server/cmd_replication.go +++ b/server/cmd_replication.go @@ -3,8 +3,6 @@ package server import ( "fmt" "github.com/siddontang/go/hack" - "github.com/siddontang/go/snappy" - "github.com/siddontang/ledisdb/ledis" "io/ioutil" "os" @@ -94,14 +92,6 @@ func syncCommand(c *client) error { } else { buf := c.syncBuf.Bytes() - if len(c.compressBuf) < snappy.MaxEncodedLen(len(buf)) { - c.compressBuf = make([]byte, snappy.MaxEncodedLen(len(buf))) - } - - if buf, err = snappy.Encode(c.compressBuf, buf); err != nil { - return err - } - c.resp.writeBulk(buf) } diff --git a/server/replication.go b/server/replication.go index 3dd767e..d912016 100644 --- a/server/replication.go +++ b/server/replication.go @@ -7,8 +7,6 @@ import ( "fmt" "github.com/siddontang/go/hack" "github.com/siddontang/go/log" - - "github.com/siddontang/go/snappy" "github.com/siddontang/ledisdb/ledis" "github.com/siddontang/ledisdb/rpl" "net" @@ -38,8 +36,6 @@ type master struct { wg sync.WaitGroup syncBuf bytes.Buffer - - compressBuf []byte } func newMaster(app *App) *master { @@ -48,8 +44,6 @@ func newMaster(app *App) *master { m.quit = make(chan struct{}, 1) - m.compressBuf = make([]byte, 256) - return m } @@ -219,13 +213,7 @@ func (m *master) sync() error { } } - var buf []byte - buf, err = snappy.Decode(m.compressBuf, m.syncBuf.Bytes()) - if err != nil { - return err - } else if len(buf) > len(m.compressBuf) { - m.compressBuf = buf - } + buf := m.syncBuf.Bytes() if len(buf) == 0 { return nil From 7df7af8b54b9f10f7fd7ab9ebd541c6d535860de Mon Sep 17 00:00:00 2001 From: siddontang Date: Sat, 27 Sep 2014 20:11:36 +0800 Subject: [PATCH 25/42] client rollback tx when close --- server/client_resp.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/client_resp.go b/server/client_resp.go index 1b50bc3..078a02c 100644 --- a/server/client_resp.go +++ b/server/client_resp.go @@ -55,6 +55,11 @@ func (c *respClient) run() { c.conn.Close() } + if c.tx != nil { + c.tx.Rollback() + c.tx = nil + } + c.app.removeSlave(c.client) }() From b4d82d7e34533a0439e3fd2dd3db5508df46ed1a Mon Sep 17 00:00:00 2001 From: siddontang Date: Sat, 27 Sep 2014 20:13:13 +0800 Subject: [PATCH 26/42] update watigroup add place --- ledis/ledis.go | 3 ++- ledis/replication.go | 24 +++++++++++++----------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/ledis/ledis.go b/ledis/ledis.go index 0052b3a..949452c 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -74,6 +74,7 @@ func Open2(cfg *config.Config, flags int) (*Ledis, error) { l.rc = make(chan struct{}, 8) l.rbatch = l.ldb.NewWriteBatch() + l.wg.Add(1) go l.onReplication() //first we must try wait all replication ok @@ -87,6 +88,7 @@ func Open2(cfg *config.Config, flags int) (*Ledis, error) { l.dbs[i] = l.newDB(i) } + l.wg.Add(1) go l.onDataExpired() return l, nil @@ -176,7 +178,6 @@ func (l *Ledis) SetReadOnly(b bool) { } func (l *Ledis) onDataExpired() { - l.wg.Add(1) defer l.wg.Done() var executors []*elimination = make([]*elimination, len(l.dbs)) diff --git a/ledis/replication.go b/ledis/replication.go index 1fa1531..7b4d30c 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -22,19 +22,18 @@ func (l *Ledis) ReplicationUsed() bool { return l.r != nil } -func (l *Ledis) handleReplication() { - l.commitLock.Lock() - defer l.commitLock.Unlock() - +func (l *Ledis) handleReplication() error { l.rwg.Add(1) rl := &rpl.Log{} + var err error for { - if err := l.r.NextNeedCommitLog(rl); err != nil { + if err = l.r.NextNeedCommitLog(rl); err != nil { if err != rpl.ErrNoBehindLog { log.Error("get next commit log err, %s", err.Error) + return err } else { l.rwg.Done() - return + return nil } } else { l.rbatch.Rollback() @@ -43,18 +42,22 @@ func (l *Ledis) handleReplication() { //todo optimize if rl.Data, err = snappy.Decode(nil, rl.Data); err != nil { log.Error("decode log error %s", err.Error()) - return + return err } } decodeEventBatch(l.rbatch, rl.Data) - if err := l.rbatch.Commit(); err != nil { + l.commitLock.Lock() + if err = l.rbatch.Commit(); err != nil { log.Error("commit log error %s", err.Error()) - return } else if err = l.r.UpdateCommitID(rl.ID); err != nil { log.Error("update commit id error %s", err.Error()) - return + } + + l.commitLock.Unlock() + if err != nil { + return err } } @@ -62,7 +65,6 @@ func (l *Ledis) handleReplication() { } func (l *Ledis) onReplication() { - l.wg.Add(1) defer l.wg.Done() AsyncNotify(l.rc) From 69e489dd1b47595df559f244534ff1d1a03bbddf Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 28 Sep 2014 20:37:57 +0800 Subject: [PATCH 27/42] bugfix for replication --- ledis/batch.go | 4 +++- ledis/replication.go | 14 ++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/ledis/batch.go b/ledis/batch.go index c77e91f..61d5cd2 100644 --- a/ledis/batch.go +++ b/ledis/batch.go @@ -55,7 +55,7 @@ func (b *batch) Put(key []byte, value []byte) { func (b *batch) Delete(key []byte) { if b.l.r != nil { - b.Delete(key) + b.eb.Delete(key) } b.WriteBatch.Delete(key) @@ -121,11 +121,13 @@ func (l *Ledis) handleCommit(eb *eventBatch, c commiter) error { if err = c.Commit(); err != nil { log.Fatal("commit error %s", err.Error()) + l.noticeReplication() return err } if err = l.r.UpdateCommitID(rl.ID); err != nil { log.Fatal("update commit id error %s", err.Error()) + l.noticeReplication() return err } diff --git a/ledis/replication.go b/ledis/replication.go index 7b4d30c..6741589 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -23,6 +23,9 @@ func (l *Ledis) ReplicationUsed() bool { } func (l *Ledis) handleReplication() error { + l.wLock.Lock() + defer l.wLock.Unlock() + l.rwg.Add(1) rl := &rpl.Log{} var err error @@ -73,8 +76,6 @@ func (l *Ledis) onReplication() { select { case <-l.rc: l.handleReplication() - case <-time.After(5 * time.Second): - l.handleReplication() case <-l.quit: return } @@ -86,7 +87,8 @@ func (l *Ledis) WaitReplication() error { return ErrRplNotSupport } - AsyncNotify(l.rc) + + l.noticeReplication() l.rwg.Wait() @@ -125,11 +127,15 @@ func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { } - AsyncNotify(l.rc) + l.noticeReplication() return nil } +func (l *Ledis) noticeReplication() { + AsyncNotify(l.rc) +} + func (l *Ledis) StoreLogsFromData(data []byte) error { rb := bytes.NewReader(data) From 2a3fca829ebd72b7d513f87b75644eb63d851b21 Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 28 Sep 2014 21:34:24 +0800 Subject: [PATCH 28/42] update slaveof command, wait (n + 1) /2 replies --- doc/commands.md | 6 +++--- server/app.go | 2 +- server/cmd_replication.go | 12 ++++++++++-- server/cmd_replication_test.go | 4 ++-- server/replication.go | 30 ++++++++++++++++++++++-------- 5 files changed, 38 insertions(+), 16 deletions(-) diff --git a/doc/commands.md b/doc/commands.md index 0809131..4f90a18 100644 --- a/doc/commands.md +++ b/doc/commands.md @@ -119,7 +119,7 @@ Table of Contents - [BPERSIST key](#bpersist-key) - [BXSCAN key [MATCH match] [COUNT count]](#bxscan-key-match-match-count-count) - [Replication](#replication) - - [SLAVEOF host port](#slaveof-host-port) + - [SLAVEOF host port [restart]](#slaveof-host-port-restart) - [FULLSYNC](#fullsync) - [SYNC logid](#sync-logid) - [Server](#server) @@ -2396,13 +2396,13 @@ See [XSCAN](#xscan-key-match-match-count-count) for more information. ## Replication -### SLAVEOF host port +### SLAVEOF host port [restart] Changes the replication settings of a slave on the fly. If the server is already acting as slave, SLAVEOF NO ONE will turn off the replication. SLAVEOF host port will make the server a slave of another server listening at the specified host and port. -If a server is already a slave of a master, SLAVEOF host port will stop the replication against the old and start the synchronization against the new one, discarding the old dataset. +If a server is already a slave of a master, SLAVEOF host port will stop the replication against the old and start the synchronization against the new one, if restart is set, it will discard the old dataset, otherwise it will sync with LastLogID + 1. ### FULLSYNC diff --git a/server/app.go b/server/app.go index 62f8514..dbf12e5 100644 --- a/server/app.go +++ b/server/app.go @@ -135,7 +135,7 @@ func (app *App) Close() { func (app *App) Run() { if len(app.cfg.SlaveOf) > 0 { - app.slaveof(app.cfg.SlaveOf) + app.slaveof(app.cfg.SlaveOf, false) } go app.httpServe() diff --git a/server/cmd_replication.go b/server/cmd_replication.go index ba091aa..aa6ede4 100644 --- a/server/cmd_replication.go +++ b/server/cmd_replication.go @@ -13,24 +13,32 @@ import ( func slaveofCommand(c *client) error { args := c.args - if len(args) != 2 { + if len(args) != 2 || len(args) != 3 { return ErrCmdParams } masterAddr := "" + restart := false if strings.ToLower(hack.String(args[0])) == "no" && strings.ToLower(hack.String(args[1])) == "one" { //stop replication, use master = "" + if len(args) != 2 { + return ErrCmdParams + } } else { if _, err := strconv.ParseInt(hack.String(args[1]), 10, 16); err != nil { return err } masterAddr = fmt.Sprintf("%s:%s", args[0], args[1]) + + if len(args) == 3 && strings.ToLower(hack.String(args[2])) == "restart" { + restart = true + } } - if err := c.app.slaveof(masterAddr); err != nil { + if err := c.app.slaveof(masterAddr, restart); err != nil { return err } diff --git a/server/cmd_replication_test.go b/server/cmd_replication_test.go index fee81fa..07db0c7 100644 --- a/server/cmd_replication_test.go +++ b/server/cmd_replication_test.go @@ -96,7 +96,7 @@ func TestReplication(t *testing.T) { t.Fatal(err) } - slave.slaveof("") + slave.slaveof("", false) db.Set([]byte("a2"), value) db.Set([]byte("b2"), value) @@ -112,7 +112,7 @@ func TestReplication(t *testing.T) { t.Fatal("must error") } - slave.slaveof(masterCfg.Addr) + slave.slaveof(masterCfg.Addr, false) time.Sleep(1 * time.Second) diff --git a/server/replication.go b/server/replication.go index d912016..2c409bf 100644 --- a/server/replication.go +++ b/server/replication.go @@ -84,7 +84,7 @@ func (m *master) stopReplication() error { return nil } -func (m *master) startReplication(masterAddr string) error { +func (m *master) startReplication(masterAddr string, restart bool) error { //stop last replcation, if avaliable m.Close() @@ -94,11 +94,11 @@ func (m *master) startReplication(masterAddr string) error { m.app.ldb.SetReadOnly(true) - go m.runReplication() + go m.runReplication(restart) return nil } -func (m *master) runReplication() { +func (m *master) runReplication(restart bool) { m.wg.Add(1) defer m.wg.Done() @@ -114,6 +114,16 @@ func (m *master) runReplication() { } } + if restart { + if err := m.fullSync(); err != nil { + if m.conn != nil { + //if conn == nil, other close the replication, not error + log.Error("restart fullsync error %s", err.Error()) + } + return + } + } + for { if err := m.sync(); err != nil { if m.conn != nil { @@ -227,7 +237,7 @@ func (m *master) sync() error { } -func (app *App) slaveof(masterAddr string) error { +func (app *App) slaveof(masterAddr string, restart bool) error { app.m.Lock() defer app.m.Unlock() @@ -242,7 +252,7 @@ func (app *App) slaveof(masterAddr string) error { app.ldb.SetReadOnly(false) } else { - return app.m.startReplication(masterAddr) + return app.m.startReplication(masterAddr, restart) } return nil @@ -308,15 +318,19 @@ func (app *App) publishNewLog(l *rpl.Log) { } done := make(chan struct{}, 1) - go func() { + go func(total int) { + n := 0 for i := 0; i < len(ss); i++ { id := <-ack.ch if id > logId { - break + n++ + if n >= total { + break + } } } done <- struct{}{} - }() + }((len(ss) + 1) / 2) select { case <-done: From 5786b72eb1cfdb4fb29e54b3042c7c04dee6b05b Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 28 Sep 2014 21:55:25 +0800 Subject: [PATCH 29/42] bugfix for wait replication --- ledis/replication.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/ledis/replication.go b/ledis/replication.go index 6741589..d9f737a 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -89,18 +89,20 @@ func (l *Ledis) WaitReplication() error { } l.noticeReplication() - l.rwg.Wait() - b, err := l.r.CommitIDBehind() - if err != nil { - return err - } else if b { - AsyncNotify(l.rc) - l.rwg.Wait() + for i := 0; i < 100; i++ { + b, err := l.r.CommitIDBehind() + if err != nil { + return err + } else if b { + l.noticeReplication() + l.rwg.Wait() + } else { + return nil + } } - - return nil + return errors.New("wait replication too many times") } func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { From 8eb605295181c793484d9bf7fc889601c4c05e78 Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 28 Sep 2014 21:55:36 +0800 Subject: [PATCH 30/42] add replication doc --- doc/Replication.md | 74 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 doc/Replication.md diff --git a/doc/Replication.md b/doc/Replication.md new file mode 100644 index 0000000..b88dc52 --- /dev/null +++ b/doc/Replication.md @@ -0,0 +1,74 @@ +At first, LedisDB uses BinLog (like MySQL BinLog) to support replication. Slave syncs logs from Master with specified BinLog filename and position. It is simple but not suitable for some cases. + +Let's assume below scenario: A -> B and A -> C, here A is master, B and C are slaves. -> means "replicates to". If master A failed, we must select B or C as the new master. Usually, we must choose the one which has most up to date from A, but it is not easy to check which one is it. + +MySQL has the same problem for this, so from MySQL 5.6, it introduces GTID (Global Transaction ID) to solve it. GTID is very powerful but a little complex, I just want to a simple and easy solution. + +Before GTID, Google has supplied a solution calling [Global Transaction IDs](https://code.google.com/p/google-mysql-tools/wiki/GlobalTransactionIds) which uses a monotonically increasing group id to represent an unique transaction event in BinLog. Although it has some limitations for MySQL hierarchical replication, I still think using a integer id like group id for log event is simple and suitable for LedisDB. + +Another implementation influencing me is [Raft](http://raftconsensus.github.io/), a consensus algorithm based on the replicated log. Leader must ensure that some followers receive the replicated log before executing the commands in log. The log has an unique log id (like group id above), if the leader failed, the candidate which has the up to date log (checked by log id) will be elected a new leader. + +Refer above, I supply a simple solution for LedisDB's replication. + +## Key word + ++ LogID: a monotonically increasing integer for a log ++ FirstLogID: the oldest log id for a server, all the logs before this id have been purged. ++ LastLogID: the newest log id for a server. ++ CommitID: the last log committed to execute. If LastLogID is 10 and CommitID is 5, server needs to commit logs from 6 - 10 to catch the up to date status. + +## Sync Flow + +For a master, every write changes will be handled below: + +1. Logs the changes to disk, it will calculate a new LogID based on LastLogID. +2. Sends this log to slaves and waits the ACK from slaves or timeout. +3. Commits to execute the changes. +4. Updates the CommitID to the LogID. + +For a slave: + +1. Connects to master and tells it which log to sync by LogID, it may have below cases: + + + The LogID is less than master's FirstLogID, master will tell slave log has been purged, the slave must do a full sync from master first. + + The master has this log and will send it to slave. + + The master has not this log (The slave has up to date with master). Slave will wait for some time or timeout then to start a new sync. + +2. After slave receiving a log (eg. LogID 10), it will save this log to disk and notice the replication thread to handle it. +3. Slave will start a new sync with LogID 11. + + +## Full Sync Flow + +If slave sync a log but master has purged it, slave has to start a full sync. + ++ Master generates a snapshot with current LastLogID and dumps to a file. ++ Slave discards all old data and replicated logs, then loads the dump file and updates CommitID with LastLogID in dump file. ++ Slave starts to sync with LogID = CommitID + 1. + +## ReadOnly + +Slave is always read only, which means that any write operations will be denied except `FlushAll` and replication. + +For a master, if it first writes log OK but commits or updates CommitID error, it will also turn into read only mode until replication thread executes this log correctly. + +## Strong Consensus Replication + +For the sync flow, we see that master will wait some slaves to return an ACK telling it has received the log, this mechanism implements strong consensus replication. If master failed, we can choose a slave which has up to date data with the master. + +You must notice that this feature has a big influence on the performance. Use your own risk! + +## Use + +Using replication is very simple for LedisDB, only using `slaveof` command. + ++ Uses `slaveof host port` to enable replication from master at "host:port". ++ Uses `slaveof no one` to stop replication and changes to master. + +If a slave first syncs from a master A, then uses slaveof to sync from master B, it will sync with the LogID = LastLogID + 1. If you want to start over from B, you must first use `slaveof host port restart`. + +## Limitation + ++ Replication can not store log less than current LastLogID. ++ Cycle replication not support. + From 99fcee2e58c7d3687cabe863d638f299d9e03e10 Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 28 Sep 2014 22:00:29 +0800 Subject: [PATCH 31/42] update doc --- doc/Replication.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/Replication.md b/doc/Replication.md index b88dc52..4e7986a 100644 --- a/doc/Replication.md +++ b/doc/Replication.md @@ -4,7 +4,7 @@ Let's assume below scenario: A -> B and A -> C, here A is master, B and C are sl MySQL has the same problem for this, so from MySQL 5.6, it introduces GTID (Global Transaction ID) to solve it. GTID is very powerful but a little complex, I just want to a simple and easy solution. -Before GTID, Google has supplied a solution calling [Global Transaction IDs](https://code.google.com/p/google-mysql-tools/wiki/GlobalTransactionIds) which uses a monotonically increasing group id to represent an unique transaction event in BinLog. Although it has some limitations for MySQL hierarchical replication, I still think using a integer id like group id for log event is simple and suitable for LedisDB. +Before GTID, Google has supplied a solution called [Global Transaction IDs](https://code.google.com/p/google-mysql-tools/wiki/GlobalTransactionIds) which uses a monotonically increasing group id to represent an unique transaction event in BinLog. Although it has some limitations for MySQL hierarchical replication, I still think using a integer id like group id for log event is simple and suitable for LedisDB. Another implementation influencing me is [Raft](http://raftconsensus.github.io/), a consensus algorithm based on the replicated log. Leader must ensure that some followers receive the replicated log before executing the commands in log. The log has an unique log id (like group id above), if the leader failed, the candidate which has the up to date log (checked by log id) will be elected a new leader. @@ -40,7 +40,7 @@ For a slave: ## Full Sync Flow -If slave sync a log but master has purged it, slave has to start a full sync. +If slave syncs a log but master has purged it, slave has to start a full sync. + Master generates a snapshot with current LastLogID and dumps to a file. + Slave discards all old data and replicated logs, then loads the dump file and updates CommitID with LastLogID in dump file. @@ -71,4 +71,5 @@ If a slave first syncs from a master A, then uses slaveof to sync from master B, + Replication can not store log less than current LastLogID. + Cycle replication not support. ++ Master and slave must set `use_replication` to true to support replication. From 956a8b4421d0d7269bebdc880a5e6be0689d9b19 Mon Sep 17 00:00:00 2001 From: siddontang Date: Mon, 29 Sep 2014 14:11:14 +0800 Subject: [PATCH 32/42] update doc --- README.md | 4 ++-- doc/Replication.md | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 6779b94..05ba17d 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ LedisDB now supports multiple databases as backend to store data, you can test a + Supports lua scripting. + Supports expiration and ttl. + Supports using redis-cli directly. -+ Multiple client API supports, including Go, Python, Lua(Openresty), C/C++, Node.js. ++ Multiple client API support, including Go, Python, Lua(Openresty), C/C++, Node.js. + Easy to embed in your own Go application. + Restful API support, json/bson/msgpack output. + Replication to guarantee data safe. @@ -75,7 +75,7 @@ Create a workspace and checkout ledisdb source ## Choose store database -LedisDB now supports goleveldb, lmdb, leveldb, rocksdb, boltdb, hyperleveldb. it will choose goleveldb as default to store data if you not set. +LedisDB now supports goleveldb, lmdb, leveldb, rocksdb, boltdb, hyperleveldb, memory. it will choose goleveldb as default to store data if you don't set. Choosing a store database to use is very simple, you have two ways: diff --git a/doc/Replication.md b/doc/Replication.md index 4e7986a..6e6613b 100644 --- a/doc/Replication.md +++ b/doc/Replication.md @@ -10,7 +10,7 @@ Another implementation influencing me is [Raft](http://raftconsensus.github.io/) Refer above, I supply a simple solution for LedisDB's replication. -## Key word +## Keyword + LogID: a monotonically increasing integer for a log + FirstLogID: the oldest log id for a server, all the logs before this id have been purged. @@ -62,14 +62,14 @@ You must notice that this feature has a big influence on the performance. Use yo Using replication is very simple for LedisDB, only using `slaveof` command. -+ Uses `slaveof host port` to enable replication from master at "host:port". -+ Uses `slaveof no one` to stop replication and changes to master. ++ Use `slaveof host port` to enable replication from master at "host:port". ++ Use `slaveof no one` to stop replication and change the slave to master. -If a slave first syncs from a master A, then uses slaveof to sync from master B, it will sync with the LogID = LastLogID + 1. If you want to start over from B, you must first use `slaveof host port restart`. +If a slave first syncs from a master A, then uses `slaveof` to sync from master B, it will sync with the LogID = LastLogID + 1. If you want to start over from B, you must use `slaveof host port restart` which will start a full sync first. ## Limitation + Replication can not store log less than current LastLogID. -+ Cycle replication not support. ++ Cycle replication is not supported. + Master and slave must set `use_replication` to true to support replication. From de46e1a511ca87499613bbfcb3779ee5bb4be93d Mon Sep 17 00:00:00 2001 From: siddontang Date: Mon, 29 Sep 2014 14:19:42 +0800 Subject: [PATCH 33/42] update doc --- cmd/ledis-cli/const.go | 6 +++--- doc/commands.json | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/ledis-cli/const.go b/cmd/ledis-cli/const.go index 842866b..b89e757 100644 --- a/cmd/ledis-cli/const.go +++ b/cmd/ledis-cli/const.go @@ -1,4 +1,4 @@ -//This file was generated by .tools/generate_commands.py on Thu Sep 25 2014 09:51:10 +0800 +//This file was generated by .tools/generate_commands.py on Mon Sep 29 2014 14:19:33 +0800 package main var helpCommands = [][]string{ @@ -86,7 +86,7 @@ var helpCommands = [][]string{ {"SINTER", "key [key ...]", "Set"}, {"SINTERSTORE", "destination key [key ...]", "Set"}, {"SISMEMBER", "key member", "Set"}, - {"SLAVEOF", "host port", "Replication"}, + {"SLAVEOF", "host port [restart]", "Replication"}, {"SMCLEAR", "key [key ...]", "Set"}, {"SMEMBERS", "key", "Set"}, {"SPERSIST", "key", "Set"}, @@ -95,7 +95,7 @@ var helpCommands = [][]string{ {"SUNION", "key [key ...]", "Set"}, {"SUNIONSTORE", "destination key [key ...]", "Set"}, {"SXSCAN", "key [MATCH match] [COUNT count]", "Set"}, - {"SYNC", "index offset", "Replication"}, + {"SYNC", "logid", "Replication"}, {"TTL", "key", "KV"}, {"XSCAN", "key [MATCH match] [COUNT count]", "KV"}, {"ZADD", "key score member [score member ...]", "ZSet"}, diff --git a/doc/commands.json b/doc/commands.json index 828186d..6ed95bd 100644 --- a/doc/commands.json +++ b/doc/commands.json @@ -301,12 +301,12 @@ "readonly": false }, "SLAVEOF": { - "arguments": "host port", + "arguments": "host port [restart]", "group": "Replication", "readonly": false }, "SYNC": { - "arguments": "index offset", + "arguments": "logid", "group": "Replication", "readonly": false }, From 98912695a43a6f2216fed57291f4afedf1256eef Mon Sep 17 00:00:00 2001 From: siddontang Date: Mon, 29 Sep 2014 17:01:58 +0800 Subject: [PATCH 34/42] wait replication sleep some time --- ledis/ledis.go | 2 +- ledis/replication.go | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ledis/ledis.go b/ledis/ledis.go index 949452c..8893eee 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -71,7 +71,7 @@ func Open2(cfg *config.Config, flags int) (*Ledis, error) { return nil, err } - l.rc = make(chan struct{}, 8) + l.rc = make(chan struct{}, 1) l.rbatch = l.ldb.NewWriteBatch() l.wg.Add(1) diff --git a/ledis/replication.go b/ledis/replication.go index d9f737a..b68a990 100644 --- a/ledis/replication.go +++ b/ledis/replication.go @@ -70,7 +70,7 @@ func (l *Ledis) handleReplication() error { func (l *Ledis) onReplication() { defer l.wg.Done() - AsyncNotify(l.rc) + l.noticeReplication() for { select { @@ -98,10 +98,12 @@ func (l *Ledis) WaitReplication() error { } else if b { l.noticeReplication() l.rwg.Wait() + time.Sleep(100 * time.Millisecond) } else { return nil } } + return errors.New("wait replication too many times") } From bae86ca7dd0f41cf1f0b69e9c3f896acf8166e77 Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 1 Oct 2014 16:47:35 +0800 Subject: [PATCH 35/42] support godep --- .gitignore | 1 + Godeps/Godeps.json | 54 +++++++++++++++++++++++++++++++++++++++++++ Godeps/Readme | 5 ++++ Makefile | 6 ++--- README.md | 3 +++ bootstrap.sh | 8 +++++++ tools/build_config.sh | 8 +++++++ 7 files changed, 82 insertions(+), 3 deletions(-) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme diff --git a/.gitignore b/.gitignore index 1955ca3..42e539f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ build nohup.out build_config.mk var +_workspace \ No newline at end of file diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 0000000..343f3a7 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,54 @@ +{ + "ImportPath": "github.com/siddontang/ledisdb", + "GoVersion": "go1.3.2", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/BurntSushi/toml", + "Rev": "2ceedfee35ad3848e49308ab0c9a4f640cfb5fb2" + }, + { + "ImportPath": "github.com/boltdb/bolt", + "Comment": "data/v1-228-g8fb50d5", + "Rev": "8fb50d5ee57110936b904a7539d4c5f2bf2359db" + }, + { + "ImportPath": "github.com/siddontang/go/bson", + "Rev": "466d5bc779ad45f5923d0f59efbc5d696bf2099c" + }, + { + "ImportPath": "github.com/siddontang/go/filelock", + "Rev": "466d5bc779ad45f5923d0f59efbc5d696bf2099c" + }, + { + "ImportPath": "github.com/siddontang/go/hack", + "Rev": "466d5bc779ad45f5923d0f59efbc5d696bf2099c" + }, + { + "ImportPath": "github.com/siddontang/go/log", + "Rev": "466d5bc779ad45f5923d0f59efbc5d696bf2099c" + }, + { + "ImportPath": "github.com/siddontang/go/num", + "Rev": "466d5bc779ad45f5923d0f59efbc5d696bf2099c" + }, + { + "ImportPath": "github.com/siddontang/go/snappy", + "Rev": "466d5bc779ad45f5923d0f59efbc5d696bf2099c" + }, + { + "ImportPath": "github.com/siddontang/goleveldb/leveldb", + "Rev": "c1f6d721561c48f467b26a277741e55fd224df1e" + }, + { + "ImportPath": "github.com/szferi/gomdb", + "Rev": "d8a6d8371e2409b0787a782bf9b0c5daca364a3d" + }, + { + "ImportPath": "github.com/ugorji/go/codec", + "Rev": "71c2886f5a673a35f909803f38ece5810165097b" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 0000000..4cdaa53 --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Makefile b/Makefile index f5b6dcd..6764157 100644 --- a/Makefile +++ b/Makefile @@ -16,13 +16,13 @@ export GO_BUILD_TAGS all: build build: - go install -tags '$(GO_BUILD_TAGS)' ./... + $(GO) install -tags '$(GO_BUILD_TAGS)' ./... clean: - go clean -i ./... + $(GO) clean -i ./... test: - go test -tags '$(GO_BUILD_TAGS)' ./... + $(GO) test -tags '$(GO_BUILD_TAGS)' ./... pytest: sh client/ledis-py/tests/all.sh diff --git a/README.md b/README.md index 05ba17d..3d1cf57 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,9 @@ Create a workspace and checkout ledisdb source make make test +## Godep support + +LedisDB supports building with [godep](https://github.com/tools/godep) which can manage LedisDB go dependence automatically. ## LevelDB support diff --git a/bootstrap.sh b/bootstrap.sh index e6d4071..ca844b2 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -2,6 +2,14 @@ . ./dev.sh +# Test godep install +godep path > /dev/null 2>&1 +if [ "$?" = 0 ]; then + GOPATH=`godep path` + godep restore + exit 0 +fi + go get github.com/siddontang/goleveldb/leveldb go get github.com/szferi/gomdb diff --git a/tools/build_config.sh b/tools/build_config.sh index 22b7bf2..c97e13d 100755 --- a/tools/build_config.sh +++ b/tools/build_config.sh @@ -13,6 +13,14 @@ touch $OUTPUT source ./dev.sh +# Test godep install +godep path > /dev/null 2>&1 +if [ "$?" = 0 ]; then + echo "GO=godep go" >> $OUTPUT +else + echo "GO=go" >> $OUTPUT +fi + echo "CGO_CFLAGS=$CGO_CFLAGS" >> $OUTPUT echo "CGO_CXXFLAGS=$CGO_CXXFLAGS" >> $OUTPUT echo "CGO_LDFLAGS=$CGO_LDFLAGS" >> $OUTPUT From d675556cb44be8e3e382f3ce6b5614f58d273c06 Mon Sep 17 00:00:00 2001 From: siddontang Date: Wed, 1 Oct 2014 23:26:46 +0800 Subject: [PATCH 36/42] support zset lex commands --- ledis/t_zset.go | 80 +++++++++++++++++++++++++ ledis/t_zset_test.go | 58 ++++++++++++++++++ server/cmd_zset.go | 127 ++++++++++++++++++++++++++++++++++++++++ server/cmd_zset_test.go | 49 ++++++++++++++++ 4 files changed, 314 insertions(+) diff --git a/ledis/t_zset.go b/ledis/t_zset.go index 3a2336f..dc028c0 100644 --- a/ledis/t_zset.go +++ b/ledis/t_zset.go @@ -939,3 +939,83 @@ func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, agg func (db *DB) ZScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { return db.scan(ZSizeType, key, count, inclusive, match) } + +func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, offset int, count int) ([][]byte, error) { + if min == nil { + min = db.zEncodeStartSetKey(key) + } else { + min = db.zEncodeSetKey(key, min) + } + if max == nil { + max = db.zEncodeStopSetKey(key) + } else { + max = db.zEncodeSetKey(key, max) + } + + it := db.bucket.RangeLimitIterator(min, max, rangeType, offset, count) + defer it.Close() + + ay := make([][]byte, 0, 16) + for ; it.Valid(); it.Next() { + if _, m, err := db.zDecodeSetKey(it.Key()); err == nil { + ay = append(ay, m) + } + } + + return ay, nil +} + +func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { + if min == nil { + min = db.zEncodeStartSetKey(key) + } else { + min = db.zEncodeSetKey(key, min) + } + if max == nil { + max = db.zEncodeStopSetKey(key) + } else { + max = db.zEncodeSetKey(key, max) + } + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + it := db.bucket.RangeIterator(min, max, rangeType) + defer it.Close() + + var n int64 = 0 + for ; it.Valid(); it.Next() { + t.Delete(it.RawKey()) + n++ + } + + if err := t.Commit(); err != nil { + return 0, err + } + + return n, nil +} + +func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { + if min == nil { + min = db.zEncodeStartSetKey(key) + } else { + min = db.zEncodeSetKey(key, min) + } + if max == nil { + max = db.zEncodeStopSetKey(key) + } else { + max = db.zEncodeSetKey(key, max) + } + + it := db.bucket.RangeIterator(min, max, rangeType) + defer it.Close() + + var n int64 = 0 + for ; it.Valid(); it.Next() { + n++ + } + + return n, nil +} diff --git a/ledis/t_zset_test.go b/ledis/t_zset_test.go index a1754ed..98f0a81 100644 --- a/ledis/t_zset_test.go +++ b/ledis/t_zset_test.go @@ -2,6 +2,8 @@ package ledis import ( "fmt" + "github.com/siddontang/ledisdb/store" + "reflect" "testing" ) @@ -407,3 +409,59 @@ func TestZScan(t *testing.T) { t.Fatal("invalid value length ", len(v)) } } + +func TestZLex(t *testing.T) { + db := getTestDB() + if _, err := db.zFlush(); err != nil { + t.Fatal(err) + } + + key := []byte("myzset") + if _, err := db.ZAdd(key, ScorePair{0, []byte("a")}, + ScorePair{0, []byte("b")}, + ScorePair{0, []byte("c")}, + ScorePair{0, []byte("d")}, + ScorePair{0, []byte("e")}, + ScorePair{0, []byte("f")}, + ScorePair{0, []byte("g")}); err != nil { + t.Fatal(err) + } + + if ay, err := db.ZRangeByLex(key, nil, []byte("c"), store.RangeClose, 0, -1); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, [][]byte{[]byte("a"), []byte("b"), []byte("c")}) { + t.Fatal("must equal a, b, c") + } + + if ay, err := db.ZRangeByLex(key, nil, []byte("c"), store.RangeROpen, 0, -1); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, [][]byte{[]byte("a"), []byte("b")}) { + t.Fatal("must equal a, b") + } + + if ay, err := db.ZRangeByLex(key, []byte("aaa"), []byte("g"), store.RangeROpen, 0, -1); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, [][]byte{[]byte("b"), + []byte("c"), []byte("d"), []byte("e"), []byte("f")}) { + t.Fatal("must equal b, c, d, e, f", fmt.Sprintf("%q", ay)) + } + + if n, err := db.ZLexCount(key, nil, nil, store.RangeClose); err != nil { + t.Fatal(err) + } else if n != 7 { + t.Fatal(n) + } + + if n, err := db.ZRemRangeByLex(key, []byte("aaa"), []byte("g"), store.RangeROpen); err != nil { + t.Fatal(err) + } else if n != 5 { + t.Fatal(n) + } + + if n, err := db.ZLexCount(key, nil, nil, store.RangeClose); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } + +} diff --git a/server/cmd_zset.go b/server/cmd_zset.go index 4763683..3c5abcb 100644 --- a/server/cmd_zset.go +++ b/server/cmd_zset.go @@ -5,6 +5,7 @@ import ( "github.com/siddontang/go/hack" "github.com/siddontang/go/num" "github.com/siddontang/ledisdb/ledis" + "github.com/siddontang/ledisdb/store" "math" "strconv" "strings" @@ -661,6 +662,128 @@ func zxscanCommand(c *client) error { return nil } +func zparseMemberRange(minBuf []byte, maxBuf []byte) (min []byte, max []byte, rangeType uint8, err error) { + rangeType = store.RangeClose + if strings.ToLower(hack.String(minBuf)) == "-" { + min = nil + } else { + if len(minBuf) == 0 { + err = ErrCmdParams + return + } + + if minBuf[0] == '(' { + rangeType |= store.RangeLOpen + min = minBuf[1:] + } else if minBuf[0] == '[' { + min = minBuf[1:] + } else { + err = ErrCmdParams + return + } + } + + if strings.ToLower(hack.String(maxBuf)) == "+" { + max = nil + } else { + if len(maxBuf) == 0 { + err = ErrCmdParams + return + } + if maxBuf[0] == '(' { + rangeType |= store.RangeROpen + max = maxBuf[1:] + } else if maxBuf[0] == '[' { + max = maxBuf[1:] + } else { + err = ErrCmdParams + return + } + } + + return +} + +func zrangebylexCommand(c *client) error { + args := c.args + if len(args) != 3 && len(args) != 6 { + return ErrCmdParams + } + + min, max, rangeType, err := zparseMemberRange(args[1], args[2]) + if err != nil { + return err + } + + var offset int = 0 + var count int = -1 + + if len(args) == 6 { + if strings.ToLower(hack.String(args[3])) != "limit" { + return ErrSyntax + } + + if offset, err = strconv.Atoi(hack.String(args[4])); err != nil { + return ErrValue + } + + if count, err = strconv.Atoi(hack.String(args[5])); err != nil { + return ErrValue + } + } + + key := args[0] + if ay, err := c.db.ZRangeByLex(key, min, max, rangeType, offset, count); err != nil { + return err + } else { + c.resp.writeSliceArray(ay) + } + + return nil +} + +func zremrangebylexCommand(c *client) error { + args := c.args + if len(args) != 3 { + return ErrCmdParams + } + + min, max, rangeType, err := zparseMemberRange(args[1], args[2]) + if err != nil { + return err + } + + key := args[0] + if n, err := c.db.ZRemRangeByLex(key, min, max, rangeType); err != nil { + return err + } else { + c.resp.writeInteger(n) + } + + return nil +} + +func zlexcountCommand(c *client) error { + args := c.args + if len(args) != 3 { + return ErrCmdParams + } + + min, max, rangeType, err := zparseMemberRange(args[1], args[2]) + if err != nil { + return err + } + + key := args[0] + if n, err := c.db.ZLexCount(key, min, max, rangeType); err != nil { + return err + } else { + c.resp.writeInteger(n) + } + + return nil +} + func init() { register("zadd", zaddCommand) register("zcard", zcardCommand) @@ -680,6 +803,10 @@ func init() { register("zunionstore", zunionstoreCommand) register("zinterstore", zinterstoreCommand) + register("zrangebylex", zrangebylexCommand) + register("zremrangebylex", zremrangebylexCommand) + register("zlexcount", zlexcountCommand) + //ledisdb special command register("zclear", zclearCommand) diff --git a/server/cmd_zset_test.go b/server/cmd_zset_test.go index 8c74bdc..59411c5 100644 --- a/server/cmd_zset_test.go +++ b/server/cmd_zset_test.go @@ -3,6 +3,7 @@ package server import ( "fmt" "github.com/siddontang/ledisdb/client/go/ledis" + "reflect" "strconv" "testing" ) @@ -737,3 +738,51 @@ func TestZInterStore(t *testing.T) { } } } + +func TestZSetLex(t *testing.T) { + c := getTestConn() + defer c.Close() + + key := []byte("myzlexset") + if _, err := c.Do("zadd", key, + 0, "a", 0, "b", 0, "c", 0, "d", 0, "e", 0, "f", 0, "g"); err != nil { + t.Fatal(err) + } + + if ay, err := ledis.Strings(c.Do("zrangebylex", key, "-", "[c")); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, []string{"a", "b", "c"}) { + t.Fatal("must equal") + } + + if ay, err := ledis.Strings(c.Do("zrangebylex", key, "-", "(c")); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, []string{"a", "b"}) { + t.Fatal("must equal") + } + + if ay, err := ledis.Strings(c.Do("zrangebylex", key, "[aaa", "(g")); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, []string{"b", "c", "d", "e", "f"}) { + t.Fatal("must equal") + } + + if n, err := ledis.Int64(c.Do("zlexcount", key, "-", "(c")); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } + + if n, err := ledis.Int64(c.Do("zremrangebylex", key, "[aaa", "(g")); err != nil { + t.Fatal(err) + } else if n != 5 { + t.Fatal(n) + } + + if n, err := ledis.Int64(c.Do("zlexcount", key, "-", "+")); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } + +} From 1820190f96ade99135b7c9e19c7487a1c5c09f66 Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 2 Oct 2014 15:40:53 +0800 Subject: [PATCH 37/42] update doc --- doc/commands.json | 18 ++++++++++++ doc/commands.md | 70 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/doc/commands.json b/doc/commands.json index 6ed95bd..b813d3b 100644 --- a/doc/commands.json +++ b/doc/commands.json @@ -512,6 +512,24 @@ "readonly": false }, + "ZRANGEBYLEX":{ + "arguments": "key min max [LIMIT offset count]", + "group": "ZSet", + "readonly": true + }, + + "ZREMRANGBYLEX":{ + "arguments": "key min max", + "group": "ZSet", + "readonly": false + }, + + "ZLEXCOUNT":{ + "arguments": "key min max", + "group": "ZSet", + "readonly": true + }, + "BEGIN": { "arguments": "-", "group": "Transaction", diff --git a/doc/commands.md b/doc/commands.md index 4f90a18..2cae8fd 100644 --- a/doc/commands.md +++ b/doc/commands.md @@ -106,6 +106,9 @@ Table of Contents - [ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX] ](#zinterstore-destination-numkeys-key-key--weights-weight-weight--aggregate-summinmax) - [ZXSCAN key [MATCH match] [COUNT count]](#zxscan-key-match-match-count-count) + - [ZRANGEBYLEX key min max [LIMIT offset count]](#zrangebylex-key-min-max-limit-offset-count) + - [ZREMRANGEBYLEX key min max](#zremrangebylex-key-min-max) + - [ZLEXCOUNT key min max](#zlexcount-key-min-max) - [Bitmap](#bitmap) - [BGET key](#bget-key) - [BGETBIT key offset](#bgetbit-key-offset) @@ -2227,10 +2230,75 @@ Iterate ZSet keys incrementally. See [XSCAN](#xscan-key-match-match-count-count) for more information. +### ZRANGEBYLEX key min max [LIMIT offset count] + +When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at key with a value between min and max. + +If the elements in the sorted set have different scores, the returned elements are unspecified. + +Valid start and stop must start with ( or [, in order to specify if the range item is respectively exclusive or inclusive. The special values of + or - for start and stop have the special meaning or positively infinite and negatively infinite strings, so for instance the command ZRANGEBYLEX myzset - + is guaranteed to return all the elements in the sorted set, if all the elements have the same score. + +**Return value** + +array: list of elements in the specified score range + +**Example** + +``` +ledis> ZADD myzset 0 a 0 b 0 c 0 d 0 e 0 f 0 g +(integer) 7 +ledis> ZRANGEBYLEX myzset - [c +1) "a" +2) "b" +3) "c" +ledis> ZRANGEBYLEX myzset - (c +1) "a" +2) "b" +ledis> ZRANGEBYLEX myzset [aaa (g +1) "b" +2) "c" +3) "d" +4) "e" +5) "f" +``` + +### ZREMRANGEBYLEX key min max + +Removes all elements in the sorted set stored at key between the lexicographical range specified by min and max. + +**Return value** + +int64: he number of elements removed. + +**Example** + +``` +ledis> ZADD myzset 0 a 0 b 0 c 0 d 0 e 0 f 0 g +(integer) 7 +ledis> ZREMRANGEBYLEX myzset - [c +(integer) 3 +``` + +### ZLEXCOUNT key min max + +Returns the number of elements in the sorted set at key with a value between min and max. + +**Return value** + +int64: the number of elements in the specified score range. + +**Example** + +``` +ledis> ZADD myzset 0 a 0 b 0 c 0 d 0 e 0 f 0 g +(integer) 7 +ledis> ZLEXCOUNT myzset - [c +(integer) 3 +``` + ## Bitmap - ### BGET key Returns the whole binary data stored at `key`. From a75791f72cb7156cdea11ff5786f8e6f69ce6053 Mon Sep 17 00:00:00 2001 From: siddontang Date: Thu, 2 Oct 2014 15:41:06 +0800 Subject: [PATCH 38/42] add client command --- client/ledis-py/ledis/client.py | 64 +++++++++++++++++++++++++++++ client/nodejs/ledis/lib/commands.js | 3 ++ client/openresty/ledis.lua | 6 +++ cmd/ledis-cli/const.go | 5 ++- 4 files changed, 77 insertions(+), 1 deletion(-) diff --git a/client/ledis-py/ledis/client.py b/client/ledis-py/ledis/client.py index 17cc1c4..2504e5d 100644 --- a/client/ledis-py/ledis/client.py +++ b/client/ledis-py/ledis/client.py @@ -733,6 +733,70 @@ class Ledis(object): "Return the score of element ``value`` in sorted set ``name``" return self.execute_command('ZSCORE', name, value) + def zinterstore(self, dest, keys, aggregate=None): + """ + Intersect multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZINTERSTORE', dest, keys, aggregate) + + def zunionstore(self, dest, keys, aggregate=None): + """ + Union multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate) + + def _zaggregate(self, command, dest, keys, aggregate=None): + pieces = [command, dest, len(keys)] + if isinstance(keys, dict): + keys, weights = iterkeys(keys), itervalues(keys) + else: + weights = None + pieces.extend(keys) + if weights: + pieces.append(Token('WEIGHTS')) + pieces.extend(weights) + if aggregate: + pieces.append(Token('AGGREGATE')) + pieces.append(aggregate) + return self.execute_command(*pieces) + + def zrangebylex(self, name, min, max, start=None, num=None): + """ + Return the lexicographical range of values from sorted set ``name`` + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice of the + range. + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYLEX', name, min, max] + if start is not None and num is not None: + pieces.extend([Token('LIMIT'), start, num]) + return self.execute_command(*pieces) + + def zremrangebylex(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` between the + lexicographical range specified by ``min`` and ``max``. + + Returns the number of elements removed. + """ + return self.execute_command('ZREMRANGEBYLEX', name, min, max) + + def zlexcount(self, name, min, max): + """ + Return the number of items in the sorted set ``name`` between the + lexicographical range ``min`` and ``max``. + """ + return self.execute_command('ZLEXCOUNT', name, min, max) + + # SPECIAL COMMANDS SUPPORTED BY LEDISDB def zclear(self, name): "Delete key of ``name`` from sorted set" diff --git a/client/nodejs/ledis/lib/commands.js b/client/nodejs/ledis/lib/commands.js index f116444..8a24f6c 100644 --- a/client/nodejs/ledis/lib/commands.js +++ b/client/nodejs/ledis/lib/commands.js @@ -93,6 +93,9 @@ module.exports = [ "zscore", "zunionstore", "zinterstore", + "zrangebylex", + "zremrangebylex", + "zlexcount", "zclear", diff --git a/client/openresty/ledis.lua b/client/openresty/ledis.lua index 07c3f2b..7834c2b 100644 --- a/client/openresty/ledis.lua +++ b/client/openresty/ledis.lua @@ -95,6 +95,12 @@ local commands = { "zrevrank", "zrevrangebyscore", "zscore", + "zunionstore", + "zinterstore", + "zrangebylex", + "zremrangebylex", + "zlexcount", + --[[ledisdb special commands]] "zclear", "zmclear", diff --git a/cmd/ledis-cli/const.go b/cmd/ledis-cli/const.go index b89e757..3bca898 100644 --- a/cmd/ledis-cli/const.go +++ b/cmd/ledis-cli/const.go @@ -1,4 +1,4 @@ -//This file was generated by .tools/generate_commands.py on Mon Sep 29 2014 14:19:33 +0800 +//This file was generated by .tools/generate_commands.py on Thu Oct 02 2014 15:24:07 +0800 package main var helpCommands = [][]string{ @@ -106,12 +106,15 @@ var helpCommands = [][]string{ {"ZEXPIREAT", "key timestamp", "ZSet"}, {"ZINCRBY", "key increment member", "ZSet"}, {"ZINTERSTORE", "destkey numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]", "ZSet"}, + {"ZLEXCOUNT", "key min max", "ZSet"}, {"ZMCLEAR", "key [key ...]", "ZSet"}, {"ZPERSIST", "key", "ZSet"}, {"ZRANGE", "key start stop [WITHSCORES]", "ZSet"}, + {"ZRANGEBYLEX", "key min max [LIMIT offset count]", "ZSet"}, {"ZRANGEBYSCORE", "key min max [WITHSCORES] [LIMIT offset count]", "ZSet"}, {"ZRANK", "key member", "ZSet"}, {"ZREM", "key member [member ...]", "ZSet"}, + {"ZREMRANGBYLEX", "key min max", "ZSet"}, {"ZREMRANGEBYRANK", "key start stop", "ZSet"}, {"ZREMRANGEBYSCORE", "key min max", "ZSet"}, {"ZREVRANGE", "key start stop [WITHSCORES]", "ZSet"}, From 3f93676d4d4208f0072ce582f93397402b2dde36 Mon Sep 17 00:00:00 2001 From: siddontang Date: Fri, 3 Oct 2014 20:31:24 +0800 Subject: [PATCH 39/42] flushall will restart replication if possible --- server/cmd_replication_test.go | 10 ++++++++++ server/cmd_server.go | 3 +++ server/replication.go | 19 ++++++++++++++++++- 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/server/cmd_replication_test.go b/server/cmd_replication_test.go index 07db0c7..76bf2c2 100644 --- a/server/cmd_replication_test.go +++ b/server/cmd_replication_test.go @@ -120,4 +120,14 @@ func TestReplication(t *testing.T) { t.Fatal(err) } + slave.tryReSlaveof() + + time.Sleep(1 * time.Second) + + slave.ldb.WaitReplication() + + if err = checkDataEqual(master, slave); err != nil { + t.Fatal(err) + } + } diff --git a/server/cmd_server.go b/server/cmd_server.go index ab8051f..198003c 100644 --- a/server/cmd_server.go +++ b/server/cmd_server.go @@ -68,6 +68,9 @@ func flushallCommand(c *client) error { return err } + //we will restart the replication from master if possible + c.app.tryReSlaveof() + c.resp.writeStatus(OK) return nil } diff --git a/server/replication.go b/server/replication.go index 2c409bf..dcd1587 100644 --- a/server/replication.go +++ b/server/replication.go @@ -94,12 +94,12 @@ func (m *master) startReplication(masterAddr string, restart bool) error { m.app.ldb.SetReadOnly(true) + m.wg.Add(1) go m.runReplication(restart) return nil } func (m *master) runReplication(restart bool) { - m.wg.Add(1) defer m.wg.Done() for { @@ -245,6 +245,8 @@ func (app *App) slaveof(masterAddr string, restart bool) error { return fmt.Errorf("slaveof must enable replication") } + app.cfg.SlaveOf = masterAddr + if len(masterAddr) == 0 { if err := app.m.stopReplication(); err != nil { return err @@ -258,6 +260,21 @@ func (app *App) slaveof(masterAddr string, restart bool) error { return nil } +func (app *App) tryReSlaveof() error { + app.m.Lock() + defer app.m.Unlock() + + if !app.ldb.ReplicationUsed() { + return nil + } + + if len(app.cfg.SlaveOf) == 0 { + return nil + } else { + return app.m.startReplication(app.cfg.SlaveOf, true) + } +} + func (app *App) addSlave(c *client) { app.slock.Lock() defer app.slock.Unlock() From 498a8c6f8155f3d9f5e00fb65cdb549e4699ee14 Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 5 Oct 2014 08:56:35 +0800 Subject: [PATCH 40/42] update read me --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3d1cf57..2f12a55 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,8 @@ LedisDB supports building with [godep](https://github.com/tools/godep) which can + Set `ROCKSDB_DIR` and `SNAPPY_DIR` to the actual install path in `dev.sh`. + `make clean && make` -**Because RocksDB API may change sometimes, LedisDB may not build successfully. Now LedisDB supports RocksDB version 3.5 or newest master branch. ** + +Because RocksDB API may change sometimes, LedisDB may not build successfully. Now LedisDB supports RocksDB version 3.5 or newest master branch. ## HyperLevelDB support @@ -78,7 +79,7 @@ LedisDB supports building with [godep](https://github.com/tools/godep) which can ## Choose store database -LedisDB now supports goleveldb, lmdb, leveldb, rocksdb, boltdb, hyperleveldb, memory. it will choose goleveldb as default to store data if you don't set. +LedisDB now supports goleveldb, lmdb, leveldb, rocksdb, boltdb, hyperleveldb, memory. it will use goleveldb as default to store data if you don't set. Choosing a store database to use is very simple, you have two ways: From 4d974a0db75022214261bf0b9f3e5dc1da319350 Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 5 Oct 2014 17:24:44 +0800 Subject: [PATCH 41/42] add wait max slave acks config --- config/config.go | 12 +++++++----- config/config.toml | 5 +++++ config/config_test.go | 2 +- etc/ledis.conf | 29 ++++++++++++++++++++++++----- server/replication.go | 9 ++++++++- 5 files changed, 45 insertions(+), 12 deletions(-) diff --git a/config/config.go b/config/config.go index 5a8070b..668b545 100644 --- a/config/config.go +++ b/config/config.go @@ -30,11 +30,12 @@ type LMDBConfig struct { } type ReplicationConfig struct { - Path string `toml:"path"` - ExpiredLogDays int `toml:"expired_log_days"` - Sync bool `toml:"sync"` - WaitSyncTime int `toml:"wait_sync_time"` - Compression bool `toml:"compression"` + Path string `toml:"path"` + ExpiredLogDays int `toml:"expired_log_days"` + Sync bool `toml:"sync"` + WaitSyncTime int `toml:"wait_sync_time"` + WaitMaxSlaveAcks int `toml:"wait_max_slave_acks"` + Compression bool `toml:"compression"` } type Config struct { @@ -100,6 +101,7 @@ func NewConfigDefault() *Config { cfg.Replication.WaitSyncTime = 1 cfg.Replication.Compression = true + cfg.Replication.WaitMaxSlaveAcks = 2 return cfg } diff --git a/config/config.toml b/config/config.toml index fbe86a6..b8d80ec 100644 --- a/config/config.toml +++ b/config/config.toml @@ -60,5 +60,10 @@ sync = true # If sync is true, wait at last wait_sync_time seconds for slave syncing this log wait_sync_time = 1 +# If sync is true, wait at most min(wait_max_slave_acks, (n + 1) / 2) to promise syncing ok. +# n is slave number +# If 0, wait (n + 1) / 2 acks. +wait_max_slave_acks = 2 + # Compress the log or not compression = true diff --git a/config/config_test.go b/config/config_test.go index c2b5a16..47779aa 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -5,7 +5,7 @@ import ( ) func TestConfig(t *testing.T) { - _, err := NewConfigWithFile("./config.toml") + _, err := NewConfigWithFile("./ledis.toml") if err != nil { t.Fatal(err) } diff --git a/etc/ledis.conf b/etc/ledis.conf index 0d46aee..b8d80ec 100644 --- a/etc/ledis.conf +++ b/etc/ledis.conf @@ -1,7 +1,5 @@ # LedisDB configuration -# Config format is toml, https://github.com/toml-lang/toml - # Server listen address addr = "127.0.0.1:6380" @@ -15,6 +13,7 @@ data_dir = "/tmp/ledis_server" access_log = "" # Set slaveof to enable replication from master, empty, no replication +# Any write operations except flushall and replication will be disabled in slave mode. slaveof = "" # Choose which backend storage to use, now support: @@ -29,9 +28,12 @@ slaveof = "" # db_name = "leveldb" -# if not set, use data_dir/"db_name"_data +# If not set, use data_dir/"db_name"_data db_path = "" +# enable replication or not +use_replication = true + [leveldb] compression = false block_size = 32768 @@ -43,8 +45,25 @@ max_open_files = 1024 map_size = 524288000 nosync = true -[wal] -# if not set, use data_dir/wal +[replication] +# Path to store replication information(write ahead log, commit log, etc.) +# if not set, use data_dir/rpl path = "" +# Expire write ahead logs after the given days +expired_log_days = 7 +# If sync is true, the new log must be sent to some slaves, and then commit. +# It will reduce performance but have better high availability. +sync = true + +# If sync is true, wait at last wait_sync_time seconds for slave syncing this log +wait_sync_time = 1 + +# If sync is true, wait at most min(wait_max_slave_acks, (n + 1) / 2) to promise syncing ok. +# n is slave number +# If 0, wait (n + 1) / 2 acks. +wait_max_slave_acks = 2 + +# Compress the log or not +compression = true diff --git a/server/replication.go b/server/replication.go index dcd1587..b8b1868 100644 --- a/server/replication.go +++ b/server/replication.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/siddontang/go/hack" "github.com/siddontang/go/log" + "github.com/siddontang/go/num" "github.com/siddontang/ledisdb/ledis" "github.com/siddontang/ledisdb/rpl" "net" @@ -334,6 +335,11 @@ func (app *App) publishNewLog(l *rpl.Log) { s.ack = ack } + total := (len(ss) + 1) / 2 + if app.cfg.Replication.WaitMaxSlaveAcks > 0 { + total = num.MinInt(total, app.cfg.Replication.WaitMaxSlaveAcks) + } + done := make(chan struct{}, 1) go func(total int) { n := 0 @@ -347,10 +353,11 @@ func (app *App) publishNewLog(l *rpl.Log) { } } done <- struct{}{} - }((len(ss) + 1) / 2) + }(total) select { case <-done: case <-time.After(time.Duration(app.cfg.Replication.WaitSyncTime) * time.Second): + log.Info("replication wait timeout") } } From 87f19c85916a12599c95bf31e30126af78efc52c Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 5 Oct 2014 21:36:12 +0800 Subject: [PATCH 42/42] update doc --- doc/Replication.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/Replication.md b/doc/Replication.md index 6e6613b..9e5b6f0 100644 --- a/doc/Replication.md +++ b/doc/Replication.md @@ -69,7 +69,8 @@ If a slave first syncs from a master A, then uses `slaveof` to sync from master ## Limitation ++ Multi-Master is not supported. + Replication can not store log less than current LastLogID. -+ Cycle replication is not supported. ++ Circular replication is not supported. + Master and slave must set `use_replication` to true to support replication.