Merge branch 'develop'

This commit is contained in:
siddontang 2014-08-08 15:26:32 +08:00
commit cd14eaa879
40 changed files with 568 additions and 623 deletions

View File

@ -66,13 +66,11 @@ Choosing a store database to use is very simple, you have two ways:
+ Set in server config file
"db" : {
"name" : "leveldb"
}
db_name = "leveldb"
+ Set in command flag
ledis-server -config=/etc/ledis.json -db_name=leveldb
ledis-server -config=/etc/ledis.conf -db_name=leveldb
Flag command set will overwrite config set.
@ -80,12 +78,18 @@ Choosing a store database to use is very simple, you have two ways:
You must known that changing store database runtime is very dangerous, LedisDB will not guarantee the data validation if you do it.
## Configuration
LedisDB uses [toml](https://github.com/toml-lang/toml) as the preferred configuration format, also supports ```json``` because of some history reasons. The basic configuration ```./etc/ledis.conf``` in LedisDB source may help you.
If you don't use a configuration, LedisDB will use the default for you.
## Server Example
//set run environment if not
source dev.sh
ledis-server -config=/etc/ledis.json
ledis-server -config=/etc/ledis.conf
//another shell
ledis-cli -p 6380

View File

@ -4,7 +4,6 @@
go get github.com/siddontang/go-log/log
go get github.com/siddontang/go-snappy/snappy
go get github.com/siddontang/copier
go get github.com/siddontang/goleveldb/leveldb
@ -14,4 +13,5 @@ go get github.com/boltdb/bolt
go get gopkg.in/mgo.v2/bson
go get github.com/ugorji/go/codec
go get github.com/BurntSushi/toml

View File

@ -1,14 +1,13 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/ledis"
"io/ioutil"
)
var configPath = flag.String("config", "/etc/ledis.json", "ledisdb config file")
var configPath = flag.String("config", "", "ledisdb config file")
var dumpPath = flag.String("dump_file", "", "ledisdb dump file")
func main() {
@ -19,7 +18,7 @@ func main() {
return
}
data, err := ioutil.ReadFile(*configPath)
cfg, err := config.NewConfigWithFile(*configPath)
if err != nil {
println(err.Error())
return
@ -30,24 +29,18 @@ func main() {
return
}
var cfg ledis.Config
if err = json.Unmarshal(data, &cfg); err != nil {
println(err.Error())
return
}
if len(cfg.DataDir) == 0 {
println("must set data dir")
return
}
ldb, err := ledis.Open(&cfg)
ldb, err := ledis.Open(cfg)
if err != nil {
println("ledis open error ", err.Error())
return
}
err = loadDump(&cfg, ldb)
err = loadDump(cfg, ldb)
ldb.Close()
if err != nil {
@ -58,7 +51,7 @@ func main() {
println("Load OK")
}
func loadDump(cfg *ledis.Config, ldb *ledis.Ledis) error {
func loadDump(cfg *config.Config, ldb *ledis.Ledis) error {
var err error
if err = ldb.FlushAll(); err != nil {
return err

View File

@ -1,14 +1,12 @@
package main
import (
"encoding/json"
"flag"
"github.com/siddontang/ledisdb/ledis"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store"
"io/ioutil"
)
var fileName = flag.String("config", "/etc/ledis.json", "ledisdb config file")
var fileName = flag.String("config", "", "ledisdb config file")
func main() {
flag.Parse()
@ -18,14 +16,9 @@ func main() {
return
}
data, err := ioutil.ReadFile(*fileName)
if err != nil {
println(err.Error())
return
}
cfg, err := config.NewConfigWithFile(*fileName)
var cfg ledis.Config
if err = json.Unmarshal(data, &cfg); err != nil {
if err != nil {
println(err.Error())
return
}
@ -35,7 +28,7 @@ func main() {
return
}
if err = store.Repair(cfg.NewDBConfig()); err != nil {
if err = store.Repair(cfg); err != nil {
println("repair error: ", err.Error())
}
}

View File

@ -2,6 +2,7 @@ package main
import (
"flag"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/server"
"log"
"net/http"
@ -12,7 +13,7 @@ import (
"syscall"
)
var configFile = flag.String("config", "/etc/ledis.json", "ledisdb config file")
var configFile = flag.String("config", "", "ledisdb config file")
var dbName = flag.String("db_name", "", "select a db to use, it will overwrite the config's db name")
func main() {
@ -20,19 +21,23 @@ func main() {
flag.Parse()
var cfg *config.Config
var err error
if len(*configFile) == 0 {
println("must use a config file")
return
println("no config set, using default config")
cfg = config.NewConfigDefault()
} else {
cfg, err = config.NewConfigWithFile(*configFile)
}
cfg, err := server.NewConfigWithFile(*configFile)
if err != nil {
println(err.Error())
return
}
if len(*dbName) > 0 {
cfg.DB.Name = *dbName
cfg.DBName = *dbName
}
var app *server.App

141
config/config.go Normal file
View File

@ -0,0 +1,141 @@
package config
import (
"encoding/json"
"github.com/BurntSushi/toml"
"io/ioutil"
)
type Size int
const (
DefaultAddr string = "127.0.0.1:6380"
DefaultHttpAddr string = "127.0.0.1:11181"
DefaultDBName string = "goleveldb"
DefaultDataDir string = "./var"
)
const (
MaxBinLogFileSize int = 1024 * 1024 * 1024
MaxBinLogFileNum int = 10000
DefaultBinLogFileSize int = MaxBinLogFileSize
DefaultBinLogFileNum int = 10
)
type LevelDBConfig struct {
Compression bool `toml:"compression" json:"compression"`
BlockSize int `toml:"block_size" json:"block_size"`
WriteBufferSize int `toml:"write_buffer_size" json:"write_buffer_size"`
CacheSize int `toml:"cache_size" json:"cache_size"`
MaxOpenFiles int `toml:"max_open_files" json:"max_open_files"`
}
type LMDBConfig struct {
MapSize int `toml:"map_size" json:"map_size"`
}
type BinLogConfig struct {
MaxFileSize int `toml:"max_file_size" json:"max_file_size"`
MaxFileNum int `toml:"max_file_num" json:"max_file_num"`
}
type Config struct {
Addr string `toml:"addr" json:"addr"`
HttpAddr string `toml:"http_addr" json:"http_addr"`
DataDir string `toml:"data_dir" json:"data_dir"`
DBName string `toml:"db_name" json:"db_name"`
LevelDB LevelDBConfig `toml:"leveldb" json:"leveldb"`
LMDB LMDBConfig `toml:"lmdb" json:"lmdb"`
BinLog BinLogConfig `toml:"binlog" json:"binlog"`
SlaveOf string `toml:"slaveof" json:"slaveof"`
AccessLog string `toml:"access_log" json:"access_log"`
}
func NewConfigWithFile(fileName string) (*Config, error) {
data, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
return NewConfigWithData(data)
}
func NewConfigWithData(data []byte) (*Config, error) {
cfg := NewConfigDefault()
_, err := toml.Decode(string(data), cfg)
if err != nil {
//try json
if err = json.Unmarshal(data, cfg); err != nil {
return nil, err
}
}
return cfg, nil
}
func NewConfigDefault() *Config {
cfg := new(Config)
cfg.Addr = DefaultAddr
cfg.HttpAddr = DefaultHttpAddr
cfg.DataDir = DefaultDataDir
cfg.DBName = DefaultDBName
// disable binlog
cfg.BinLog.MaxFileNum = 0
cfg.BinLog.MaxFileSize = 0
// disable replication
cfg.SlaveOf = ""
// disable access log
cfg.AccessLog = ""
return cfg
}
func (cfg *LevelDBConfig) Adjust() {
if cfg.CacheSize <= 0 {
cfg.CacheSize = 4 * 1024 * 1024
}
if cfg.BlockSize <= 0 {
cfg.BlockSize = 4 * 1024
}
if cfg.WriteBufferSize <= 0 {
cfg.WriteBufferSize = 4 * 1024 * 1024
}
if cfg.MaxOpenFiles < 1024 {
cfg.MaxOpenFiles = 1024
}
}
func (cfg *BinLogConfig) Adjust() {
if cfg.MaxFileSize <= 0 {
cfg.MaxFileSize = DefaultBinLogFileSize
} else if cfg.MaxFileSize > MaxBinLogFileSize {
cfg.MaxFileSize = MaxBinLogFileSize
}
if cfg.MaxFileNum <= 0 {
cfg.MaxFileNum = DefaultBinLogFileNum
} else if cfg.MaxFileNum > MaxBinLogFileNum {
cfg.MaxFileNum = MaxBinLogFileNum
}
}

21
config/config.json Normal file
View File

@ -0,0 +1,21 @@
{
"addr": "127.0.0.1:6380",
"http_addr": "127.0.0.1:11181",
"data_dir": "/tmp/ledis_server",
"db_name" : "leveldb",
"leveldb": {
"compression": false,
"block_size": 32768,
"write_buffer_size": 67108864,
"cache_size": 524288000,
"max_open_files":1024
},
"lmdb" : {
"map_size" : 524288000
},
"access_log" : ""
}

42
config/config.toml Normal file
View File

@ -0,0 +1,42 @@
# LedisDB configuration
# Server listen address
addr = "127.0.0.1:6380"
# Server http listen address, set empty to disable
http_addr = "127.0.0.1:11181"
# Data store path, all ledisdb's data will be saved here
data_dir = "/tmp/ledis_server"
# Log server command, set empty to disable
access_log = ""
# Set slaveof to enable replication from master, empty, no replication
slaveof = ""
# Choose which backend storage to use, now support:
#
# leveldb
# rocksdb
# goleveldb
# lmdb
# boltdb
#
db_name = "leveldb"
[leveldb]
compression = false
block_size = 32768
write_buffer_size = 67108864
cache_size = 524288000
max_open_files = 1024
[lmdb]
map_size = 524288000
[binlog]
max_file_size = 0
max_file_num = 0

39
config/config_test.go Normal file
View File

@ -0,0 +1,39 @@
package config
import (
"reflect"
"testing"
)
func TestConfig(t *testing.T) {
dstCfg := new(Config)
dstCfg.Addr = "127.0.0.1:6380"
dstCfg.HttpAddr = "127.0.0.1:11181"
dstCfg.DataDir = "/tmp/ledis_server"
dstCfg.DBName = "leveldb"
dstCfg.LevelDB.Compression = false
dstCfg.LevelDB.BlockSize = 32768
dstCfg.LevelDB.WriteBufferSize = 67108864
dstCfg.LevelDB.CacheSize = 524288000
dstCfg.LevelDB.MaxOpenFiles = 1024
dstCfg.LMDB.MapSize = 524288000
cfg, err := NewConfigWithFile("./config.toml")
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(dstCfg, cfg) {
t.Fatal("parse toml error")
}
cfg, err = NewConfigWithFile("./config.json")
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(dstCfg, cfg) {
t.Fatal("parse json error")
}
}

45
etc/ledis.conf Normal file
View File

@ -0,0 +1,45 @@
# LedisDB configuration
# Config format is toml, https://github.com/toml-lang/toml
# Server listen address
addr = "127.0.0.1:6380"
# Server http listen address, set empty to disable
http_addr = "127.0.0.1:11181"
# Data store path, all ledisdb's data will be saved here
data_dir = "/tmp/ledis_server"
# Log server command, set empty to disable
access_log = ""
# Set slaveof to enable replication from master, empty, no replication
slaveof = ""
# Choose which backend storage to use, now support:
#
# leveldb
# rocksdb
# goleveldb
# lmdb
# boltdb
#
db_name = "leveldb"
[leveldb]
compression = false
block_size = 32768
write_buffer_size = 67108864
cache_size = 524288000
max_open_files = 1024
[lmdb]
map_size = 524288000
[binlog]
# Set either size or num to 0 to disable binlog
max_file_size = 0
max_file_num = 0

View File

@ -1,18 +0,0 @@
{
"addr": "127.0.0.1:6380",
"http_addr": "127.0.0.1:11181",
"data_dir": "/tmp/ledis_server",
"db": {
"name" : "leveldb",
"compression": false,
"block_size": 32768,
"write_buffer_size": 67108864,
"cache_size": 524288000,
"max_open_files":1024,
"map_size" : 524288000
},
"access_log" : "access.log"
}

View File

@ -3,9 +3,9 @@ package ledis
import (
"bufio"
"encoding/binary"
"encoding/json"
"fmt"
"github.com/siddontang/go-log/log"
"github.com/siddontang/ledisdb/config"
"io/ioutil"
"os"
"path"
@ -14,14 +14,6 @@ import (
"time"
)
const (
MaxBinLogFileSize int = 1024 * 1024 * 1024
MaxBinLogFileNum int = 10000
DefaultBinLogFileSize int = MaxBinLogFileSize
DefaultBinLogFileNum int = 10
)
/*
index file format:
ledis-bin.00001
@ -34,28 +26,10 @@ timestamp(bigendian uint32, seconds)|PayloadLen(bigendian uint32)|PayloadData
*/
type BinLogConfig struct {
Path string `json:"path"`
MaxFileSize int `json:"max_file_size"`
MaxFileNum int `json:"max_file_num"`
}
func (cfg *BinLogConfig) adjust() {
if cfg.MaxFileSize <= 0 {
cfg.MaxFileSize = DefaultBinLogFileSize
} else if cfg.MaxFileSize > MaxBinLogFileSize {
cfg.MaxFileSize = MaxBinLogFileSize
}
if cfg.MaxFileNum <= 0 {
cfg.MaxFileNum = DefaultBinLogFileNum
} else if cfg.MaxFileNum > MaxBinLogFileNum {
cfg.MaxFileNum = MaxBinLogFileNum
}
}
type BinLog struct {
cfg *BinLogConfig
path string
cfg *config.BinLogConfig
logFile *os.File
@ -66,24 +40,15 @@ type BinLog struct {
lastLogIndex int64
}
func NewBinLogWithJsonConfig(data json.RawMessage) (*BinLog, error) {
var cfg BinLogConfig
if err := json.Unmarshal(data, &cfg); err != nil {
return nil, err
}
return NewBinLog(&cfg)
}
func NewBinLog(cfg *BinLogConfig) (*BinLog, error) {
cfg.adjust()
func NewBinLog(cfg *config.Config) (*BinLog, error) {
l := new(BinLog)
l.cfg = cfg
l.cfg = &cfg.BinLog
l.cfg.Adjust()
if err := os.MkdirAll(cfg.Path, os.ModePerm); err != nil {
l.path = path.Join(cfg.DataDir, "bin_log")
if err := os.MkdirAll(l.path, os.ModePerm); err != nil {
return nil, err
}
@ -123,7 +88,7 @@ func (l *BinLog) flushIndex() error {
}
func (l *BinLog) loadIndex() error {
l.indexName = path.Join(l.cfg.Path, fmt.Sprintf("ledis-bin.index"))
l.indexName = path.Join(l.path, fmt.Sprintf("ledis-bin.index"))
if _, err := os.Stat(l.indexName); os.IsNotExist(err) {
//no index file, nothing to do
} else {
@ -139,7 +104,7 @@ func (l *BinLog) loadIndex() error {
continue
}
if _, err := os.Stat(path.Join(l.cfg.Path, line)); err != nil {
if _, err := os.Stat(path.Join(l.path, line)); err != nil {
log.Error("load index line %s error %s", line, err.Error())
return err
} else {
@ -180,7 +145,7 @@ func (l *BinLog) openNewLogFile() error {
var err error
lastName := l.getLogFile()
logPath := path.Join(l.cfg.Path, lastName)
logPath := path.Join(l.path, lastName)
if l.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666); err != nil {
log.Error("open new logfile error %s", err.Error())
return err
@ -224,7 +189,7 @@ func (l *BinLog) checkLogFileSize() bool {
func (l *BinLog) purge(n int) {
for i := 0; i < n; i++ {
logPath := path.Join(l.cfg.Path, l.logNames[i])
logPath := path.Join(l.path, l.logNames[i])
os.Remove(logPath)
}
@ -265,11 +230,11 @@ func (l *BinLog) FormatLogFileName(index int64) string {
}
func (l *BinLog) FormatLogFilePath(index int64) string {
return path.Join(l.cfg.Path, l.FormatLogFileName(index))
return path.Join(l.path, l.FormatLogFileName(index))
}
func (l *BinLog) LogPath() string {
return l.cfg.Path
return l.path
}
func (l *BinLog) Purge(n int) error {

View File

@ -1,19 +1,20 @@
package ledis
import (
"github.com/siddontang/ledisdb/config"
"io/ioutil"
"os"
"testing"
)
func TestBinLog(t *testing.T) {
cfg := new(BinLogConfig)
cfg := new(config.Config)
cfg.MaxFileNum = 1
cfg.MaxFileSize = 1024
cfg.Path = "/tmp/ledis_binlog"
cfg.BinLog.MaxFileNum = 1
cfg.BinLog.MaxFileSize = 1024
cfg.DataDir = "/tmp/ledis_binlog"
os.RemoveAll(cfg.Path)
os.RemoveAll(cfg.DataDir)
b, err := NewBinLog(cfg)
if err != nil {
@ -28,7 +29,7 @@ func TestBinLog(t *testing.T) {
t.Fatal(err)
}
if fs, err := ioutil.ReadDir(cfg.Path); err != nil {
if fs, err := ioutil.ReadDir(b.LogPath()); err != nil {
t.Fatal(err)
} else if len(fs) != 2 {
t.Fatal(len(fs))

View File

@ -1,54 +0,0 @@
package ledis
import (
"fmt"
"github.com/siddontang/copier"
"github.com/siddontang/ledisdb/store"
"path"
"strings"
)
type Config struct {
DataDir string `json:"data_dir"`
DB struct {
Name string `json:"name"`
Compression bool `json:"compression"`
BlockSize int `json:"block_size"`
WriteBufferSize int `json:"write_buffer_size"`
CacheSize int `json:"cache_size"`
MaxOpenFiles int `json:"max_open_files"`
MapSize int `json:"map_size"`
} `json:"db"`
BinLog struct {
Use bool `json:"use"`
MaxFileSize int `json:"max_file_size"`
MaxFileNum int `json:"max_file_num"`
} `json:"binlog"`
}
func (cfg *Config) NewDBConfig() *store.Config {
if len(cfg.DB.Name) == 0 {
fmt.Printf("no store set, use default %s\n", store.DefaultStoreName)
cfg.DB.Name = store.DefaultStoreName
}
cfg.DB.Name = strings.ToLower(cfg.DB.Name)
dbCfg := new(store.Config)
copier.Copy(dbCfg, &cfg.DB)
dbPath := path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DB.Name))
dbCfg.Path = dbPath
return dbCfg
}
func (cfg *Config) NewBinLogConfig() *BinLogConfig {
binLogPath := path.Join(cfg.DataDir, "bin_log")
c := new(BinLogConfig)
copier.Copy(c, &cfg.BinLog)
c.Path = binLogPath
return c
}

View File

@ -2,46 +2,29 @@ package ledis
import (
"bytes"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store"
"os"
"testing"
)
func TestDump(t *testing.T) {
os.RemoveAll("/tmp/test_ledis_master")
os.RemoveAll("/tmp/test_ledis_slave")
cfgM := new(config.Config)
cfgM.DataDir = "/tmp/test_ledis_master"
var masterConfig = []byte(`
{
"data_dir" : "/tmp/test_ledis_master",
"data_db" : {
"compression":true,
"block_size" : 32768,
"write_buffer_size" : 2097152,
"cache_size" : 20971520
}
}
`)
os.RemoveAll(cfgM.DataDir)
master, err := OpenWithJsonConfig(masterConfig)
master, err := Open(cfgM)
if err != nil {
t.Fatal(err)
}
var slaveConfig = []byte(`
{
"data_dir" : "/tmp/test_ledis_slave",
"data_db" : {
"compression":true,
"block_size" : 32768,
"write_buffer_size" : 2097152,
"cache_size" : 20971520
}
}
`)
cfgS := new(config.Config)
cfgS.DataDir = "/tmp/test_ledis_slave"
os.RemoveAll(cfgM.DataDir)
var slave *Ledis
if slave, err = OpenWithJsonConfig(slaveConfig); err != nil {
if slave, err = Open(cfgS); err != nil {
t.Fatal(err)
}

View File

@ -1,9 +1,9 @@
package ledis
import (
"encoding/json"
"fmt"
"github.com/siddontang/go-log/log"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store"
"sync"
"time"
@ -26,7 +26,7 @@ type DB struct {
type Ledis struct {
sync.Mutex
cfg *Config
cfg *config.Config
ldb *store.DB
dbs [MaxDBNumber]*DB
@ -37,22 +37,13 @@ type Ledis struct {
jobs *sync.WaitGroup
}
func OpenWithJsonConfig(configJson json.RawMessage) (*Ledis, error) {
var cfg Config
if err := json.Unmarshal(configJson, &cfg); err != nil {
return nil, err
}
return Open(&cfg)
}
func Open(cfg *Config) (*Ledis, error) {
func Open(cfg *config.Config) (*Ledis, error) {
if len(cfg.DataDir) == 0 {
return nil, fmt.Errorf("must set correct data_dir")
fmt.Printf("no datadir set, use default %s\n", config.DefaultDataDir)
cfg.DataDir = config.DefaultDataDir
}
ldb, err := store.Open(cfg.NewDBConfig())
ldb, err := store.Open(cfg)
if err != nil {
return nil, err
}
@ -64,9 +55,9 @@ func Open(cfg *Config) (*Ledis, error) {
l.ldb = ldb
if cfg.BinLog.Use {
if cfg.BinLog.MaxFileNum > 0 && cfg.BinLog.MaxFileSize > 0 {
println("binlog will be refactored later, use your own risk!!!")
l.binlog, err = NewBinLog(cfg.NewBinLogConfig())
l.binlog, err = NewBinLog(cfg)
if err != nil {
return nil, err
}

View File

@ -1,6 +1,7 @@
package ledis
import (
"github.com/siddontang/ledisdb/config"
"os"
"sync"
"testing"
@ -11,27 +12,15 @@ var testLedisOnce sync.Once
func getTestDB() *DB {
f := func() {
var d = []byte(`
{
"data_dir" : "/tmp/test_ledis",
"db" : {
"compression":true,
"block_size" : 32768,
"write_buffer_size" : 2097152,
"cache_size" : 20971520
},
cfg := new(config.Config)
cfg.DataDir = "/tmp/test_ledis"
cfg.BinLog.MaxFileSize = 1073741824
cfg.BinLog.MaxFileNum = 3
"binlog" : {
"max_file_size" : 1073741824,
"max_file_num" : 3
}
}
`)
os.RemoveAll("/tmp/test_ledis")
os.RemoveAll(cfg.DataDir)
var err error
testLedis, err = OpenWithJsonConfig(d)
testLedis, err = Open(cfg)
if err != nil {
println(err.Error())
panic(err)

View File

@ -3,6 +3,7 @@ package ledis
import (
"bytes"
"fmt"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store"
"os"
"path"
@ -30,26 +31,25 @@ func TestReplication(t *testing.T) {
var slave *Ledis
var err error
os.RemoveAll("/tmp/test_repl")
cfgM := new(config.Config)
cfgM.DataDir = "/tmp/test_repl/master"
master, err = OpenWithJsonConfig([]byte(`
{
"data_dir" : "/tmp/test_repl/master",
"binlog" : {
"use" : true,
"max_file_size" : 50
}
}
`))
cfgM.BinLog.MaxFileNum = 10
cfgM.BinLog.MaxFileSize = 50
os.RemoveAll(cfgM.DataDir)
master, err = Open(cfgM)
if err != nil {
t.Fatal(err)
}
slave, err = OpenWithJsonConfig([]byte(`
{
"data_dir" : "/tmp/test_repl/slave"
}
`))
cfgS := new(config.Config)
cfgS.DataDir = "/tmp/test_repl/slave"
os.RemoveAll(cfgS.DataDir)
slave, err = Open(cfgS)
if err != nil {
t.Fatal(err)
}
@ -64,7 +64,7 @@ func TestReplication(t *testing.T) {
db.HSet([]byte("c"), []byte("3"), []byte("value"))
for _, name := range master.binlog.LogNames() {
p := path.Join(master.binlog.cfg.Path, name)
p := path.Join(master.binlog.LogPath(), name)
err = slave.ReplicateFromBinLog(p)
if err != nil {

View File

@ -1,7 +1,7 @@
package server
import (
"fmt"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/ledis"
"net"
"net/http"
@ -10,7 +10,7 @@ import (
)
type App struct {
cfg *Config
cfg *config.Config
listener net.Listener
httpListener net.Listener
@ -35,9 +35,10 @@ func netType(s string) string {
}
}
func NewApp(cfg *Config) (*App, error) {
func NewApp(cfg *config.Config) (*App, error) {
if len(cfg.DataDir) == 0 {
return nil, fmt.Errorf("must set data_dir first")
println("use default datadir %s", config.DefaultDataDir)
cfg.DataDir = config.DefaultDataDir
}
app := new(App)
@ -72,7 +73,7 @@ func NewApp(cfg *Config) (*App, error) {
}
}
if app.ldb, err = ledis.Open(cfg.NewLedisConfig()); err != nil {
if app.ldb, err = ledis.Open(cfg); err != nil {
return nil, err
}

View File

@ -2,6 +2,7 @@ package server
import (
"github.com/siddontang/ledisdb/client/go/ledis"
"github.com/siddontang/ledisdb/config"
"os"
"sync"
"testing"
@ -28,28 +29,15 @@ func startTestApp() {
f := func() {
newTestLedisClient()
cfg := new(config.Config)
cfg.DataDir = "/tmp/testdb"
os.RemoveAll(cfg.DataDir)
cfg.Addr = "127.0.0.1:16380"
os.RemoveAll("/tmp/testdb")
var d = []byte(`
{
"data_dir" : "/tmp/testdb",
"addr" : "127.0.0.1:16380",
"db" : {
"compression":true,
"block_size" : 32768,
"write_buffer_size" : 2097152,
"cache_size" : 20971520,
"max_open_files" : 1024
}
}
`)
cfg, err := NewConfig(d)
if err != nil {
println(err.Error())
panic(err)
}
var err error
testApp, err = NewApp(cfg)
if err != nil {
println(err.Error())

View File

@ -3,6 +3,7 @@ package server
import (
"bytes"
"fmt"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store"
"os"
"testing"
@ -29,10 +30,11 @@ func TestReplication(t *testing.T) {
data_dir := "/tmp/test_replication"
os.RemoveAll(data_dir)
masterCfg := new(Config)
masterCfg := new(config.Config)
masterCfg.DataDir = fmt.Sprintf("%s/master", data_dir)
masterCfg.Addr = "127.0.0.1:11182"
masterCfg.BinLog.Use = true
masterCfg.BinLog.MaxFileSize = 1 * 1024 * 1024
masterCfg.BinLog.MaxFileNum = 10
var master *App
var slave *App
@ -42,7 +44,7 @@ func TestReplication(t *testing.T) {
t.Fatal(err)
}
slaveCfg := new(Config)
slaveCfg := new(config.Config)
slaveCfg.DataDir = fmt.Sprintf("%s/slave", data_dir)
slaveCfg.Addr = "127.0.0.1:11183"
slaveCfg.SlaveOf = masterCfg.Addr

View File

@ -1,69 +0,0 @@
package server
import (
"encoding/json"
"github.com/siddontang/copier"
"github.com/siddontang/ledisdb/ledis"
"io/ioutil"
)
type Config struct {
Addr string `json:"addr"`
HttpAddr string `json:"http_addr"`
DataDir string `json:"data_dir"`
DB struct {
Name string `json:"name"`
Compression bool `json:"compression"`
BlockSize int `json:"block_size"`
WriteBufferSize int `json:"write_buffer_size"`
CacheSize int `json:"cache_size"`
MaxOpenFiles int `json:"max_open_files"`
MapSize int `json:"map_size"`
} `json:"db"`
BinLog struct {
Use bool `json:"use"`
MaxFileSize int `json:"max_file_size"`
MaxFileNum int `json:"max_file_num"`
} `json:"binlog"`
//set slaveof to enable replication from master
//empty, no replication
SlaveOf string `json:"slaveof"`
AccessLog string `json:"access_log"`
}
func NewConfig(data json.RawMessage) (*Config, error) {
c := new(Config)
err := json.Unmarshal(data, c)
if err != nil {
return nil, err
}
return c, nil
}
func NewConfigWithFile(fileName string) (*Config, error) {
data, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
return NewConfig(data)
}
func (cfg *Config) NewLedisConfig() *ledis.Config {
c := new(ledis.Config)
c.DataDir = cfg.DataDir
copier.Copy(&c.DB, &cfg.DB)
copier.Copy(&c.BinLog, &cfg.BinLog)
return c
}

View File

@ -9,7 +9,7 @@
//
// Start a ledis server is very simple:
//
// cfg := new(Config)
// cfg := new(config.Config)
// cfg.Addr = "127.0.0.1:6380"
// cfg.DataDir = "/tmp/ledis"
// app := server.NewApp(cfg)

View File

@ -3,30 +3,9 @@
package store
import (
"github.com/siddontang/copier"
"github.com/siddontang/ledisdb/store/boltdb"
"github.com/siddontang/ledisdb/store/driver"
)
const BoltDBName = "boltdb"
type BoltDBStore struct {
}
func (s BoltDBStore) Open(cfg *Config) (driver.IDB, error) {
c := &boltdb.Config{}
copier.Copy(c, cfg)
return boltdb.Open(c)
}
func (s BoltDBStore) Repair(cfg *Config) error {
c := &boltdb.Config{}
copier.Copy(c, cfg)
return boltdb.Repair(c)
}
func init() {
Register(BoltDBName, BoltDBStore{})
Register(boltdb.Store{})
}

View File

@ -2,6 +2,7 @@ package boltdb
import (
"github.com/boltdb/bolt"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
"os"
"path"
@ -9,28 +10,27 @@ import (
var bucketName = []byte("ledisdb")
type Config struct {
Path string `json:"path"`
NoSync bool `json:"nosync"`
type Store struct {
}
type DB struct {
cfg *Config
db *bolt.DB
func (s Store) String() string {
return "boltdb"
}
func Open(cfg *Config) (*DB, error) {
os.MkdirAll(cfg.Path, os.ModePerm)
name := path.Join(cfg.Path, "ledis_bolt.db")
func (s Store) Open(dbPath string, cfg *config.Config) (driver.IDB, error) {
os.MkdirAll(dbPath, os.ModePerm)
name := path.Join(dbPath, "ledis_bolt.db")
db := new(DB)
var err error
db.path = name
db.cfg = cfg
db.db, err = bolt.Open(name, 0600, nil)
if err != nil {
return nil, err
}
db.db.NoSync = cfg.NoSync
var tx *bolt.Tx
tx, err = db.db.Begin(true)
if err != nil {
@ -50,10 +50,16 @@ func Open(cfg *Config) (*DB, error) {
return db, nil
}
func Repair(cfg *Config) error {
func (s Store) Repair(path string, cfg *config.Config) error {
return nil
}
type DB struct {
cfg *config.Config
db *bolt.DB
path string
}
func (db *DB) Close() error {
return db.db.Close()
}

View File

@ -1,16 +1,17 @@
package store
import (
"github.com/siddontang/ledisdb/config"
"os"
"testing"
)
func newTestBoltDB() *DB {
cfg := new(Config)
cfg.Name = BoltDBName
cfg.Path = "/tmp/testdb/boltdb"
cfg := new(config.Config)
cfg.DBName = "boltdb"
cfg.DataDir = "/tmp/testdb"
os.RemoveAll(cfg.Path)
os.RemoveAll(getStorePath(cfg))
db, err := Open(cfg)
if err != nil {

View File

@ -1,20 +0,0 @@
package store
type Config struct {
Name string `json:"name"`
Path string `json:"path"`
//for leveldb, goleveldb
Compression bool `json:"compression"`
BlockSize int `json:"block_size"`
WriteBufferSize int `json:"write_buffer_size"`
CacheSize int `json:"cache_size"`
MaxOpenFiles int `json:"max_open_files"`
//for lmdb
MapSize int `json:"map_size"`
//for boltdb
NoSync bool `json:"nosync"`
}

View File

@ -1,30 +1,9 @@
package store
import (
"github.com/siddontang/copier"
"github.com/siddontang/ledisdb/store/driver"
"github.com/siddontang/ledisdb/store/goleveldb"
)
const GoLevelDBName = "goleveldb"
type GoLevelDBStore struct {
}
func (s GoLevelDBStore) Open(cfg *Config) (driver.IDB, error) {
c := &goleveldb.Config{}
copier.Copy(c, cfg)
return goleveldb.Open(c)
}
func (s GoLevelDBStore) Repair(cfg *Config) error {
c := &goleveldb.Config{}
copier.Copy(c, cfg)
return goleveldb.Repair(c)
}
func init() {
Register(GoLevelDBName, GoLevelDBStore{})
Register(goleveldb.Store{})
}

View File

@ -5,24 +5,25 @@ import (
"github.com/siddontang/goleveldb/leveldb/cache"
"github.com/siddontang/goleveldb/leveldb/filter"
"github.com/siddontang/goleveldb/leveldb/opt"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
"os"
)
const defaultFilterBits int = 10
type Config struct {
Path string `json:"path"`
type Store struct {
}
Compression bool `json:"compression"`
BlockSize int `json:"block_size"`
WriteBufferSize int `json:"write_buffer_size"`
CacheSize int `json:"cache_size"`
MaxOpenFiles int `json:"max_open_files"`
func (s Store) String() string {
return "goleveldb"
}
type DB struct {
cfg *Config
path string
cfg *config.LevelDBConfig
db *leveldb.DB
@ -35,13 +36,14 @@ type DB struct {
filter filter.Filter
}
func Open(cfg *Config) (*DB, error) {
if err := os.MkdirAll(cfg.Path, os.ModePerm); err != nil {
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return nil, err
}
db := new(DB)
db.cfg = cfg
db.path = path
db.cfg = &cfg.LevelDB
if err := db.open(); err != nil {
return nil, err
@ -50,8 +52,8 @@ func Open(cfg *Config) (*DB, error) {
return db, nil
}
func Repair(cfg *Config) error {
db, err := leveldb.RecoverFile(cfg.Path, newOptions(cfg))
func (s Store) Repair(path string, cfg *config.Config) error {
db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB))
if err != nil {
return err
}
@ -67,18 +69,18 @@ func (db *DB) open() error {
db.iteratorOpts.DontFillCache = true
var err error
db.db, err = leveldb.OpenFile(db.cfg.Path, db.opts)
db.db, err = leveldb.OpenFile(db.path, db.opts)
return err
}
func newOptions(cfg *Config) *opt.Options {
func newOptions(cfg *config.LevelDBConfig) *opt.Options {
opts := &opt.Options{}
opts.ErrorIfMissing = false
if cfg.CacheSize > 0 {
opts.BlockCache = cache.NewLRUCache(cfg.CacheSize)
}
cfg.Adjust()
opts.BlockCache = cache.NewLRUCache(cfg.CacheSize)
//we must use bloomfilter
opts.Filter = filter.NewBloomFilter(defaultFilterBits)
@ -89,13 +91,8 @@ func newOptions(cfg *Config) *opt.Options {
opts.Compression = opt.SnappyCompression
}
if cfg.BlockSize > 0 {
opts.BlockSize = cfg.BlockSize
}
if cfg.WriteBufferSize > 0 {
opts.WriteBuffer = cfg.WriteBufferSize
}
opts.BlockSize = cfg.BlockSize
opts.WriteBuffer = cfg.WriteBufferSize
return opts
}

View File

@ -1,16 +1,18 @@
package store
import (
"github.com/siddontang/ledisdb/config"
"os"
"testing"
)
func newTestGoLevelDB() *DB {
cfg := new(Config)
cfg.Name = GoLevelDBName
cfg.Path = "/tmp/testdb/goleveldb"
cfg := new(config.Config)
cfg.DBName = "goleveldb"
cfg.DataDir = "/tmp/testdb"
os.RemoveAll(cfg.Path)
os.RemoveAll(getStorePath(cfg))
db, err := Open(cfg)
if err != nil {

View File

@ -3,30 +3,9 @@
package store
import (
"github.com/siddontang/copier"
"github.com/siddontang/ledisdb/store/driver"
"github.com/siddontang/ledisdb/store/leveldb"
)
const LevelDBName = "leveldb"
type LevelDBStore struct {
}
func (s LevelDBStore) Open(cfg *Config) (driver.IDB, error) {
c := &leveldb.Config{}
copier.Copy(c, cfg)
return leveldb.Open(c)
}
func (s LevelDBStore) Repair(cfg *Config) error {
c := &leveldb.Config{}
copier.Copy(c, cfg)
return leveldb.Repair(c)
}
func init() {
Register(LevelDBName, LevelDBStore{})
Register(leveldb.Store{})
}

View File

@ -11,6 +11,7 @@ package leveldb
import "C"
import (
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
"os"
"runtime"
@ -19,23 +20,21 @@ import (
const defaultFilterBits int = 10
type Config struct {
Path string `json:"path"`
Compression bool `json:"compression"`
BlockSize int `json:"block_size"`
WriteBufferSize int `json:"write_buffer_size"`
CacheSize int `json:"cache_size"`
MaxOpenFiles int `json:"max_open_files"`
type Store struct {
}
func Open(cfg *Config) (*DB, error) {
if err := os.MkdirAll(cfg.Path, os.ModePerm); err != nil {
func (s Store) String() string {
return "leveldb"
}
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return nil, err
}
db := new(DB)
db.cfg = cfg
db.path = path
db.cfg = &cfg.LevelDB
if err := db.open(); err != nil {
return nil, err
@ -44,9 +43,10 @@ func Open(cfg *Config) (*DB, error) {
return db, nil
}
func Repair(cfg *Config) error {
func (s Store) Repair(path string, cfg *config.Config) error {
db := new(DB)
db.cfg = cfg
db.cfg = &cfg.LevelDB
db.path = path
err := db.open()
defer db.Close()
@ -57,7 +57,7 @@ func Repair(cfg *Config) error {
}
var errStr *C.char
ldbname := C.CString(db.cfg.Path)
ldbname := C.CString(path)
defer C.leveldb_free(unsafe.Pointer(ldbname))
C.leveldb_repair_db(db.opts.Opt, ldbname, &errStr)
@ -68,7 +68,9 @@ func Repair(cfg *Config) error {
}
type DB struct {
cfg *Config
path string
cfg *config.LevelDBConfig
db *C.leveldb_t
@ -88,7 +90,7 @@ func (db *DB) open() error {
db.initOptions(db.cfg)
var errStr *C.char
ldbname := C.CString(db.cfg.Path)
ldbname := C.CString(db.path)
defer C.leveldb_free(unsafe.Pointer(ldbname))
db.db = C.leveldb_open(db.opts.Opt, ldbname, &errStr)
@ -99,14 +101,12 @@ func (db *DB) open() error {
return nil
}
func (db *DB) initOptions(cfg *Config) {
func (db *DB) initOptions(cfg *config.LevelDBConfig) {
opts := NewOptions()
opts.SetCreateIfMissing(true)
if cfg.CacheSize <= 0 {
cfg.CacheSize = 4 * 1024 * 1024
}
cfg.Adjust()
db.cache = NewLRUCache(cfg.CacheSize)
opts.SetCache(db.cache)
@ -121,22 +121,10 @@ func (db *DB) initOptions(cfg *Config) {
opts.SetCompression(SnappyCompression)
}
if cfg.BlockSize <= 0 {
cfg.BlockSize = 4 * 1024
}
opts.SetBlockSize(cfg.BlockSize)
if cfg.WriteBufferSize <= 0 {
cfg.WriteBufferSize = 4 * 1024 * 1024
}
opts.SetWriteBufferSize(cfg.WriteBufferSize)
if cfg.MaxOpenFiles < 1024 {
cfg.MaxOpenFiles = 1024
}
opts.SetMaxOpenFiles(cfg.MaxOpenFiles)
db.opts = opts

View File

@ -3,16 +3,18 @@
package store
import (
"github.com/siddontang/ledisdb/config"
"os"
"testing"
)
func newTestLevelDB() *DB {
cfg := new(Config)
cfg.Name = LevelDBName
cfg.Path = "/tmp/testdb/leveldb"
cfg := new(config.Config)
cfg.DBName = "leveldb"
cfg.DataDir = "/tmp/testdb"
os.RemoveAll(cfg.Path)
os.RemoveAll(getStorePath(cfg))
db, err := Open(cfg)
if err != nil {

View File

@ -1,30 +1,9 @@
package store
import (
"github.com/siddontang/copier"
"github.com/siddontang/ledisdb/store/driver"
"github.com/siddontang/ledisdb/store/mdb"
)
const LMDBName = "lmdb"
type LMDBStore struct {
}
func (s LMDBStore) Open(cfg *Config) (driver.IDB, error) {
c := &mdb.Config{}
copier.Copy(c, cfg)
return mdb.Open(c)
}
func (s LMDBStore) Repair(cfg *Config) error {
c := &mdb.Config{}
copier.Copy(c, cfg)
return mdb.Repair(c)
}
func init() {
Register(LMDBName, LMDBStore{})
Register(mdb.Store{})
}

View File

@ -1,26 +1,30 @@
package mdb
import (
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
mdb "github.com/szferi/gomdb"
"os"
)
type Config struct {
Path string `json:"path"`
MapSize int `json:"map_size"`
type Store struct {
}
func (s Store) String() string {
return "lmdb"
}
type MDB struct {
env *mdb.Env
db mdb.DBI
path string
cfg *config.Config
}
func Open(c *Config) (MDB, error) {
path := c.Path
if c.MapSize == 0 {
c.MapSize = 500 * 1024 * 1024
func (s Store) Open(path string, c *config.Config) (driver.IDB, error) {
mapSize := c.LMDB.MapSize
if mapSize <= 0 {
mapSize = 500 * 1024 * 1024
}
env, err := mdb.NewEnv()
@ -33,7 +37,7 @@ func Open(c *Config) (MDB, error) {
return MDB{}, err
}
if err := env.SetMapSize(uint64(c.MapSize)); err != nil {
if err := env.SetMapSize(uint64(mapSize)); err != nil {
return MDB{}, err
}
@ -72,7 +76,7 @@ func Open(c *Config) (MDB, error) {
return db, nil
}
func Repair(c *Config) error {
func (s Store) Repair(path string, c *config.Config) error {
println("llmd not supports repair")
return nil
}

View File

@ -1,18 +1,19 @@
package store
import (
"github.com/siddontang/ledisdb/config"
"os"
"testing"
)
func newTestLMDB() *DB {
cfg := new(Config)
cfg.Name = LMDBName
cfg.Path = "/tmp/testdb/lmdb"
cfg := new(config.Config)
cfg.DBName = "lmdb"
cfg.DataDir = "/tmp/testdb"
cfg.LMDB.MapSize = 10 * 1024 * 1024
cfg.MapSize = 20 * 1024 * 1024
os.RemoveAll(cfg.Path)
os.RemoveAll(getStorePath(cfg))
db, err := Open(cfg)
if err != nil {

View File

@ -3,30 +3,9 @@
package store
import (
"github.com/siddontang/copier"
"github.com/siddontang/ledisdb/store/driver"
"github.com/siddontang/ledisdb/store/rocksdb"
)
const RocksDBName = "rocksdb"
type RocksDBStore struct {
}
func (s RocksDBStore) Open(cfg *Config) (driver.IDB, error) {
c := &rocksdb.Config{}
copier.Copy(c, cfg)
return rocksdb.Open(c)
}
func (s RocksDBStore) Repair(cfg *Config) error {
c := &rocksdb.Config{}
copier.Copy(c, cfg)
return rocksdb.Repair(c)
}
func init() {
Register(RocksDBName, RocksDBStore{})
Register(rocksdb.Store{})
}

View File

@ -12,6 +12,7 @@ package rocksdb
import "C"
import (
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
"os"
"runtime"
@ -20,23 +21,21 @@ import (
const defaultFilterBits int = 10
type Config struct {
Path string `json:"path"`
Compression bool `json:"compression"`
BlockSize int `json:"block_size"`
WriteBufferSize int `json:"write_buffer_size"`
CacheSize int `json:"cache_size"`
MaxOpenFiles int `json:"max_open_files"`
type Store struct {
}
func Open(cfg *Config) (*DB, error) {
if err := os.MkdirAll(cfg.Path, os.ModePerm); err != nil {
func (s Store) String() string {
return "rocksdb"
}
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return nil, err
}
db := new(DB)
db.cfg = cfg
db.path = path
db.cfg = &cfg.LevelDB
if err := db.open(); err != nil {
return nil, err
@ -45,9 +44,10 @@ func Open(cfg *Config) (*DB, error) {
return db, nil
}
func Repair(cfg *Config) error {
func (s Store) Repair(path string, cfg *config.Config) error {
db := new(DB)
db.cfg = cfg
db.path = path
db.cfg = &cfg.LevelDB
err := db.open()
defer db.Close()
@ -58,7 +58,7 @@ func Repair(cfg *Config) error {
}
var errStr *C.char
ldbname := C.CString(db.cfg.Path)
ldbname := C.CString(path)
defer C.free(unsafe.Pointer(ldbname))
C.rocksdb_repair_db(db.opts.Opt, ldbname, &errStr)
@ -69,7 +69,9 @@ func Repair(cfg *Config) error {
}
type DB struct {
cfg *Config
path string
cfg *config.LevelDBConfig
db *C.rocksdb_t
@ -91,7 +93,7 @@ func (db *DB) open() error {
db.initOptions(db.cfg)
var errStr *C.char
ldbname := C.CString(db.cfg.Path)
ldbname := C.CString(db.path)
defer C.free(unsafe.Pointer(ldbname))
db.db = C.rocksdb_open(db.opts.Opt, ldbname, &errStr)
@ -102,14 +104,12 @@ func (db *DB) open() error {
return nil
}
func (db *DB) initOptions(cfg *Config) {
func (db *DB) initOptions(cfg *config.LevelDBConfig) {
opts := NewOptions()
opts.SetCreateIfMissing(true)
if cfg.CacheSize <= 0 {
cfg.CacheSize = 4 * 1024 * 1024
}
cfg.Adjust()
db.env = NewDefaultEnv()
db.env.SetBackgroundThreads(runtime.NumCPU() * 2)
@ -129,22 +129,10 @@ func (db *DB) initOptions(cfg *Config) {
opts.SetCompression(SnappyCompression)
}
if cfg.BlockSize <= 0 {
cfg.BlockSize = 4 * 1024
}
opts.SetBlockSize(cfg.BlockSize)
if cfg.WriteBufferSize <= 0 {
cfg.WriteBufferSize = 4 * 1024 * 1024
}
opts.SetWriteBufferSize(cfg.WriteBufferSize)
if cfg.MaxOpenFiles < 1024 {
cfg.MaxOpenFiles = 1024
}
opts.SetMaxOpenFiles(cfg.MaxOpenFiles)
opts.SetMaxBackgroundCompactions(runtime.NumCPU()*2 - 1)

View File

@ -3,16 +3,18 @@
package store
import (
"github.com/siddontang/ledisdb/config"
"os"
"testing"
)
func newTestRocksDB() *DB {
cfg := new(Config)
cfg.Name = RocksDBName
cfg.Path = "/tmp/testdb/rocksdb"
cfg := new(config.Config)
cfg.DBName = "rocksdb"
cfg.DataDir = "/tmp/testdb"
os.RemoveAll(cfg.Path)
os.RemoveAll(getStorePath(cfg))
db, err := Open(cfg)
if err != nil {

View File

@ -2,25 +2,29 @@ package store
import (
"fmt"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
"os"
"path"
)
const DefaultStoreName = "goleveldb"
type Config config.Config
type Store interface {
Open(cfg *Config) (driver.IDB, error)
Repair(cfg *Config) error
String() string
Open(path string, cfg *config.Config) (driver.IDB, error)
Repair(paht string, cfg *config.Config) error
}
var dbs = map[string]Store{}
func Register(name string, store Store) {
func Register(s Store) {
name := s.String()
if _, ok := dbs[name]; ok {
panic(fmt.Errorf("store %s is registered", name))
panic(fmt.Errorf("store %s is registered", s))
}
dbs[name] = store
dbs[name] = s
}
func ListStores() []string {
@ -32,21 +36,36 @@ func ListStores() []string {
return s
}
func Open(cfg *Config) (*DB, error) {
if len(cfg.Name) == 0 {
cfg.Name = DefaultStoreName
func getStore(cfg *config.Config) (Store, error) {
if len(cfg.DBName) == 0 {
cfg.DBName = config.DefaultDBName
}
s, ok := dbs[cfg.Name]
s, ok := dbs[cfg.DBName]
if !ok {
return nil, fmt.Errorf("store %s is not registered", cfg.Name)
return nil, fmt.Errorf("store %s is not registered", cfg.DBName)
}
if err := os.MkdirAll(cfg.Path, os.ModePerm); err != nil {
return s, nil
}
func getStorePath(cfg *config.Config) string {
return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName))
}
func Open(cfg *config.Config) (*DB, error) {
s, err := getStore(cfg)
if err != nil {
return nil, err
}
idb, err := s.Open(cfg)
path := getStorePath(cfg)
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return nil, err
}
idb, err := s.Open(path, cfg)
if err != nil {
return nil, err
}
@ -56,15 +75,13 @@ func Open(cfg *Config) (*DB, error) {
return db, nil
}
func Repair(cfg *Config) error {
if len(cfg.Name) == 0 {
cfg.Name = DefaultStoreName
func Repair(cfg *config.Config) error {
s, err := getStore(cfg)
if err != nil {
return err
}
s, ok := dbs[cfg.Name]
if !ok {
return fmt.Errorf("db %s is not registered", cfg.Name)
}
path := getStorePath(cfg)
return s.Repair(cfg)
return s.Repair(path, cfg)
}