add rocksdb support

This commit is contained in:
siddontang 2014-07-26 18:39:54 +08:00
parent dc5c2d9095
commit 6279eeadf8
19 changed files with 817 additions and 31 deletions

View File

@ -7,7 +7,7 @@ include build_config.mk
all: build all: build
build: build:
go install -tags $(GO_BUILD_TAGS) ./... go install -tags '$(GO_BUILD_TAGS)' ./...
clean: clean:
go clean -i ./... go clean -i ./...

View File

@ -30,7 +30,7 @@ Create a workspace and checkout ledisdb source
## LevelDB support ## LevelDB support
+ Install leveldb and snappy, if you have installed, skip. + Install leveldb and snappy.
LedisDB supplies a simple shell to install leveldb and snappy: LedisDB supplies a simple shell to install leveldb and snappy:
@ -42,15 +42,43 @@ Create a workspace and checkout ledisdb source
+ Set LEVELDB_DIR and SNAPPY_DIR to the actual install path in dev.sh. + Set LEVELDB_DIR and SNAPPY_DIR to the actual install path in dev.sh.
+ make + ```make```
## RocksDB support ## RocksDB support
todo....... + Install rocksdb and snappy first.
LedisDB has not supplied a simple shell to install, maybe it will later.
+ Set ROCKSDB_DIR and SNAPPY_DIR to the actual install path in dev.sh.
+ ```make```
## Choose store database
LedisDB now supports goleveldb, lmdb, leveldb, rocksdb, it will choose goleveldb as default to store data if you not set.
Choosing a store database to use is very simple, you have two ways:
+ Set in server config file
"db" : {
"name" : "leveldb"
}
+ Set in command flag
ledis-server -config=/etc/ledis.json -db_name=leveldb
Flag command set will overwrite config set.
**Caveat**
You must known that changing store database runtime is very dangerous, LedisDB will not guarantee the data validation if you do it.
## Server Example ## Server Example
./ledis-server -config=/etc/ledis.json ledis-server -config=/etc/ledis.json
//another shell //another shell
ledis-cli -p 6380 ledis-cli -p 6380

View File

@ -10,7 +10,7 @@ import (
) )
var configFile = flag.String("config", "/etc/ledis.json", "ledisdb config file") var configFile = flag.String("config", "/etc/ledis.json", "ledisdb config file")
var storeName = flag.String("store", "", "select a store to use, it will overwrite the config's store") var dbName = flag.String("db_name", "", "select a db to use, it will overwrite the config's db name")
func main() { func main() {
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
@ -28,8 +28,8 @@ func main() {
return return
} }
if len(*storeName) > 0 { if len(*dbName) > 0 {
cfg.DB.Name = *storeName cfg.DB.Name = *dbName
} }
var app *server.App var app *server.App

21
dev.sh
View File

@ -1,10 +1,10 @@
#!/bin/bash #!/bin/bash
export VTTOP=$(pwd) export LEDISTOP=$(pwd)
export VTROOT="${VTROOT:-${VTTOP/\/src\/github.com\/siddontang\/ledisdb/}}" export LEDISROOT="${LEDISROOT:-${LEDISTOP/\/src\/github.com\/siddontang\/ledisdb/}}"
# VTTOP sanity check # LEDISTOP sanity check
if [[ "$VTTOP" == "${VTTOP/\/src\/github.com\/siddontang\/ledisdb/}" ]]; then if [[ "$LEDISTOP" == "${LEDISTOP/\/src\/github.com\/siddontang\/ledisdb/}" ]]; then
echo "WARNING: VTTOP($VTTOP) does not contain src/github.com/siddontang/ledisdb" echo "WARNING: LEDISTOP($LEDISTOP) does not contain src/github.com/siddontang/ledisdb"
exit 1 exit 1
fi fi
@ -12,7 +12,7 @@ fi
#you may change yourself #you may change yourself
SNAPPY_DIR=/usr/local/snappy SNAPPY_DIR=/usr/local/snappy
LEVELDB_DIR=/usr/local/leveldb LEVELDB_DIR=/usr/local/leveldb
ROCKSDB_DIR= ROCKSDB_DIR=/usr/local/rocksdb
function add_path() function add_path()
{ {
@ -25,9 +25,12 @@ function add_path()
fi fi
} }
GO_BUILD_TAGS= export GOPATH=$(add_path $GOPATH $LEDISROOT)
export GOPATH=$(add_path $GOPATH $VTROOT) GO_BUILD_TAGS=
CGO_CFLAGS=
CGO_CXXFLAGS=
CGO_LDFLAGS=
# check snappy # check snappy
if [ -f $SNAPPY_DIR/lib/libsnappy.a ]; then if [ -f $SNAPPY_DIR/lib/libsnappy.a ]; then
@ -52,7 +55,7 @@ fi
if [ -f $ROCKSDB_DIR/lib/librocksdb.a ]; then if [ -f $ROCKSDB_DIR/lib/librocksdb.a ]; then
CGO_CFLAGS="$CGO_CFLAGS -I$ROCKSDB_DIR/include" CGO_CFLAGS="$CGO_CFLAGS -I$ROCKSDB_DIR/include"
CGO_CXXFLAGS="$CGO_CXXFLAGS -I$ROCKSDB_DIR/include" CGO_CXXFLAGS="$CGO_CXXFLAGS -I$ROCKSDB_DIR/include"
CGO_LDFLAGS="$CGO_LDFLAGS -L$ROCKSDB_DIR/lib -lleveldb" CGO_LDFLAGS="$CGO_LDFLAGS -L$ROCKSDB_DIR/lib -lrocksdb"
LD_LIBRARY_PATH=$(add_path $LD_LIBRARY_PATH $ROCKSDB_DIR/lib) LD_LIBRARY_PATH=$(add_path $LD_LIBRARY_PATH $ROCKSDB_DIR/lib)
DYLD_LIBRARY_PATH=$(add_path $DYLD_LIBRARY_PATH $ROCKSDB_DIR/lib) DYLD_LIBRARY_PATH=$(add_path $DYLD_LIBRARY_PATH $ROCKSDB_DIR/lib)
GO_BUILD_TAGS="$GO_BUILD_TAGS rocksdb" GO_BUILD_TAGS="$GO_BUILD_TAGS rocksdb"

View File

@ -5,6 +5,7 @@ import (
"github.com/siddontang/copier" "github.com/siddontang/copier"
"github.com/siddontang/ledisdb/store" "github.com/siddontang/ledisdb/store"
"path" "path"
"strings"
) )
type Config struct { type Config struct {
@ -33,6 +34,8 @@ func (cfg *Config) NewDBConfig() *store.Config {
cfg.DB.Name = store.DefaultStoreName cfg.DB.Name = store.DefaultStoreName
} }
cfg.DB.Name = strings.ToLower(cfg.DB.Name)
dbCfg := new(store.Config) dbCfg := new(store.Config)
copier.Copy(dbCfg, &cfg.DB) copier.Copy(dbCfg, &cfg.DB)

View File

@ -12,13 +12,13 @@ import (
const defaultFilterBits int = 10 const defaultFilterBits int = 10
type Config struct { type Config struct {
Path string Path string `json:"path"`
Compression bool Compression bool `json:"compression"`
BlockSize int `json:"block_size"`
BlockSize int WriteBufferSize int `json:"write_buffer_size"`
WriteBufferSize int CacheSize int `json:"cache_size"`
CacheSize int MaxOpenFiles int `json:"max_open_files"`
} }
type DB struct { type DB struct {

View File

@ -19,13 +19,13 @@ import (
const defaultFilterBits int = 10 const defaultFilterBits int = 10
type Config struct { type Config struct {
Path string Path string `json:"path"`
Compression bool Compression bool `json:"compression"`
BlockSize int BlockSize int `json:"block_size"`
WriteBufferSize int WriteBufferSize int `json:"write_buffer_size"`
CacheSize int CacheSize int `json:"cache_size"`
MaxOpenFiles int MaxOpenFiles int `json:"max_open_files"`
} }
func Open(cfg *Config) (*DB, error) { func Open(cfg *Config) (*DB, error) {

View File

@ -7,8 +7,8 @@ import (
) )
type Config struct { type Config struct {
Path string Path string `json:"path"`
MapSize int MapSize int `json:"map_size"`
} }
type MDB struct { type MDB struct {

32
store/rocksdb.go Normal file
View File

@ -0,0 +1,32 @@
// +build rocksdb
package store
import (
"github.com/siddontang/copier"
"github.com/siddontang/ledisdb/store/driver"
"github.com/siddontang/ledisdb/store/rocksdb"
)
const RocksDBName = "rocksdb"
type RocksDBStore struct {
}
func (s RocksDBStore) Open(cfg *Config) (driver.IDB, error) {
c := &rocksdb.Config{}
copier.Copy(c, cfg)
return rocksdb.Open(c)
}
func (s RocksDBStore) Repair(cfg *Config) error {
c := &rocksdb.Config{}
copier.Copy(c, cfg)
return rocksdb.Repair(c)
}
func init() {
Register(RocksDBName, RocksDBStore{})
}

59
store/rocksdb/batch.go Normal file
View File

@ -0,0 +1,59 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
import (
"unsafe"
)
type WriteBatch struct {
db *DB
wbatch *C.rocksdb_writebatch_t
}
func (w *WriteBatch) Close() error {
C.rocksdb_writebatch_destroy(w.wbatch)
return nil
}
func (w *WriteBatch) Put(key, value []byte) {
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.rocksdb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv))
}
func (w *WriteBatch) Delete(key []byte) {
C.rocksdb_writebatch_delete(w.wbatch,
(*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
}
func (w *WriteBatch) Commit() error {
return w.commit(w.db.writeOpts)
}
func (w *WriteBatch) Rollback() error {
C.rocksdb_writebatch_clear(w.wbatch)
return nil
}
func (w *WriteBatch) commit(wb *WriteOptions) error {
var errStr *C.char
C.rocksdb_write(w.db.db, wb.Opt, w.wbatch, &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}

20
store/rocksdb/cache.go Normal file
View File

@ -0,0 +1,20 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdint.h>
// #include "rocksdb/c.h"
import "C"
type Cache struct {
Cache *C.rocksdb_cache_t
}
func NewLRUCache(capacity int) *Cache {
return &Cache{C.rocksdb_cache_create_lru(C.size_t(capacity))}
}
func (c *Cache) Close() {
C.rocksdb_cache_destroy(c.Cache)
}

279
store/rocksdb/db.go Normal file
View File

@ -0,0 +1,279 @@
// +build rocksdb
// Package rocksdb is a wrapper for c++ rocksdb
package rocksdb
/*
#cgo LDFLAGS: -lrocksdb
#include <rocksdb/c.h>
#include <stdlib.h>
#include "rocksdb_ext.h"
*/
import "C"
import (
"github.com/siddontang/ledisdb/store/driver"
"os"
"runtime"
"unsafe"
)
const defaultFilterBits int = 10
type Config struct {
Path string `json:"path"`
Compression bool `json:"compression"`
BlockSize int `json:"block_size"`
WriteBufferSize int `json:"write_buffer_size"`
CacheSize int `json:"cache_size"`
MaxOpenFiles int `json:"max_open_files"`
}
func Open(cfg *Config) (*DB, error) {
if err := os.MkdirAll(cfg.Path, os.ModePerm); err != nil {
return nil, err
}
db := new(DB)
db.cfg = cfg
if err := db.open(); err != nil {
return nil, err
}
return db, nil
}
func Repair(cfg *Config) error {
db := new(DB)
db.cfg = cfg
err := db.open()
defer db.Close()
//open ok, do not need repair
if err == nil {
return nil
}
var errStr *C.char
ldbname := C.CString(db.cfg.Path)
defer C.free(unsafe.Pointer(ldbname))
C.rocksdb_repair_db(db.opts.Opt, ldbname, &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
type DB struct {
cfg *Config
db *C.rocksdb_t
env *Env
opts *Options
//for default read and write options
readOpts *ReadOptions
writeOpts *WriteOptions
iteratorOpts *ReadOptions
cache *Cache
filter *FilterPolicy
}
func (db *DB) open() error {
db.initOptions(db.cfg)
var errStr *C.char
ldbname := C.CString(db.cfg.Path)
defer C.free(unsafe.Pointer(ldbname))
db.db = C.rocksdb_open(db.opts.Opt, ldbname, &errStr)
if errStr != nil {
db.db = nil
return saveError(errStr)
}
return nil
}
func (db *DB) initOptions(cfg *Config) {
opts := NewOptions()
opts.SetCreateIfMissing(true)
if cfg.CacheSize <= 0 {
cfg.CacheSize = 4 * 1024 * 1024
}
db.env = NewDefaultEnv()
db.env.SetBackgroundThreads(runtime.NumCPU() * 2)
db.env.SetHighPriorityBackgroundThreads(1)
opts.SetEnv(db.env)
db.cache = NewLRUCache(cfg.CacheSize)
opts.SetCache(db.cache)
//we must use bloomfilter
db.filter = NewBloomFilter(defaultFilterBits)
opts.SetFilterPolicy(db.filter)
if !cfg.Compression {
opts.SetCompression(NoCompression)
} else {
opts.SetCompression(SnappyCompression)
}
if cfg.BlockSize <= 0 {
cfg.BlockSize = 4 * 1024
}
opts.SetBlockSize(cfg.BlockSize)
if cfg.WriteBufferSize <= 0 {
cfg.WriteBufferSize = 4 * 1024 * 1024
}
opts.SetWriteBufferSize(cfg.WriteBufferSize)
if cfg.MaxOpenFiles < 1024 {
cfg.MaxOpenFiles = 1024
}
opts.SetMaxOpenFiles(cfg.MaxOpenFiles)
opts.SetMaxBackgroundCompactions(runtime.NumCPU()*2 - 1)
opts.SetMaxBackgroundFlushes(1)
opts.SetLevel0SlowdownWritesTrigger(16)
opts.SetLevel0StopWritesTrigger(64)
opts.SetTargetFileSizeBase(32 * 1024 * 1024)
db.opts = opts
db.readOpts = NewReadOptions()
db.writeOpts = NewWriteOptions()
db.iteratorOpts = NewReadOptions()
db.iteratorOpts.SetFillCache(false)
}
func (db *DB) Close() error {
if db.db != nil {
C.rocksdb_close(db.db)
db.db = nil
}
db.opts.Close()
if db.cache != nil {
db.cache.Close()
}
if db.filter != nil {
db.filter.Close()
}
if db.env != nil {
db.env.Close()
}
db.readOpts.Close()
db.writeOpts.Close()
db.iteratorOpts.Close()
return nil
}
func (db *DB) Put(key, value []byte) error {
return db.put(db.writeOpts, key, value)
}
func (db *DB) Get(key []byte) ([]byte, error) {
return db.get(db.readOpts, key)
}
func (db *DB) Delete(key []byte) error {
return db.delete(db.writeOpts, key)
}
func (db *DB) NewWriteBatch() driver.IWriteBatch {
wb := &WriteBatch{
db: db,
wbatch: C.rocksdb_writebatch_create(),
}
return wb
}
func (db *DB) NewIterator() driver.IIterator {
it := new(Iterator)
it.it = C.rocksdb_create_iterator(db.db, db.iteratorOpts.Opt)
return it
}
func (db *DB) put(wo *WriteOptions, key, value []byte) error {
var errStr *C.char
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.rocksdb_put(
db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.rocksdb_get(
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, saveError(errStr)
}
if value == nil {
return nil, nil
}
defer C.free(unsafe.Pointer(value))
return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
}
func (db *DB) delete(wo *WriteOptions, key []byte) error {
var errStr *C.char
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
C.rocksdb_delete(
db.db, wo.Opt, k, C.size_t(len(key)), &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}

27
store/rocksdb/env.go Normal file
View File

@ -0,0 +1,27 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
type Env struct {
Env *C.rocksdb_env_t
}
func NewDefaultEnv() *Env {
return &Env{C.rocksdb_create_default_env()}
}
func (env *Env) SetHighPriorityBackgroundThreads(n int) {
C.rocksdb_env_set_high_priority_background_threads(env.Env, C.int(n))
}
func (env *Env) SetBackgroundThreads(n int) {
C.rocksdb_env_set_background_threads(env.Env, C.int(n))
}
func (env *Env) Close() {
C.rocksdb_env_destroy(env.Env)
}

View File

@ -0,0 +1,21 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
type FilterPolicy struct {
Policy *C.rocksdb_filterpolicy_t
}
func NewBloomFilter(bitsPerKey int) *FilterPolicy {
policy := C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))
return &FilterPolicy{policy}
}
func (fp *FilterPolicy) Close() {
C.rocksdb_filterpolicy_destroy(fp.Policy)
}

70
store/rocksdb/iterator.go Normal file
View File

@ -0,0 +1,70 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
// #include "rocksdb_ext.h"
import "C"
import (
"unsafe"
)
type Iterator struct {
it *C.rocksdb_iterator_t
isValid C.uchar
}
func (it *Iterator) Key() []byte {
var klen C.size_t
kdata := C.rocksdb_iter_key(it.it, &klen)
if kdata == nil {
return nil
}
return slice(unsafe.Pointer(kdata), int(C.int(klen)))
}
func (it *Iterator) Value() []byte {
var vlen C.size_t
vdata := C.rocksdb_iter_value(it.it, &vlen)
if vdata == nil {
return nil
}
return slice(unsafe.Pointer(vdata), int(C.int(vlen)))
}
func (it *Iterator) Close() error {
if it.it != nil {
C.rocksdb_iter_destroy(it.it)
it.it = nil
}
return nil
}
func (it *Iterator) Valid() bool {
return ucharToBool(it.isValid)
}
func (it *Iterator) Next() {
it.isValid = C.rocksdb_iter_next_ext(it.it)
}
func (it *Iterator) Prev() {
it.isValid = C.rocksdb_iter_prev_ext(it.it)
}
func (it *Iterator) First() {
it.isValid = C.rocksdb_iter_seek_to_first_ext(it.it)
}
func (it *Iterator) Last() {
it.isValid = C.rocksdb_iter_seek_to_last_ext(it.it)
}
func (it *Iterator) Seek(key []byte) {
it.isValid = C.rocksdb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
}

138
store/rocksdb/options.go Normal file
View File

@ -0,0 +1,138 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
type CompressionOpt int
const (
NoCompression = CompressionOpt(0)
SnappyCompression = CompressionOpt(1)
)
type Options struct {
Opt *C.rocksdb_options_t
}
type ReadOptions struct {
Opt *C.rocksdb_readoptions_t
}
type WriteOptions struct {
Opt *C.rocksdb_writeoptions_t
}
func NewOptions() *Options {
opt := C.rocksdb_options_create()
return &Options{opt}
}
func NewReadOptions() *ReadOptions {
opt := C.rocksdb_readoptions_create()
return &ReadOptions{opt}
}
func NewWriteOptions() *WriteOptions {
opt := C.rocksdb_writeoptions_create()
return &WriteOptions{opt}
}
func (o *Options) Close() {
C.rocksdb_options_destroy(o.Opt)
}
func (o *Options) SetComparator(cmp *C.rocksdb_comparator_t) {
C.rocksdb_options_set_comparator(o.Opt, cmp)
}
func (o *Options) SetErrorIfExists(error_if_exists bool) {
eie := boolToUchar(error_if_exists)
C.rocksdb_options_set_error_if_exists(o.Opt, eie)
}
func (o *Options) SetCache(cache *Cache) {
C.rocksdb_options_set_cache(o.Opt, cache.Cache)
}
func (o *Options) SetEnv(env *Env) {
C.rocksdb_options_set_env(o.Opt, env.Env)
}
func (o *Options) SetWriteBufferSize(s int) {
C.rocksdb_options_set_write_buffer_size(o.Opt, C.size_t(s))
}
func (o *Options) SetParanoidChecks(pc bool) {
C.rocksdb_options_set_paranoid_checks(o.Opt, boolToUchar(pc))
}
func (o *Options) SetMaxOpenFiles(n int) {
C.rocksdb_options_set_max_open_files(o.Opt, C.int(n))
}
func (o *Options) SetBlockSize(s int) {
C.rocksdb_options_set_block_size(o.Opt, C.size_t(s))
}
func (o *Options) SetBlockRestartInterval(n int) {
C.rocksdb_options_set_block_restart_interval(o.Opt, C.int(n))
}
func (o *Options) SetCompression(t CompressionOpt) {
C.rocksdb_options_set_compression(o.Opt, C.int(t))
}
func (o *Options) SetCreateIfMissing(b bool) {
C.rocksdb_options_set_create_if_missing(o.Opt, boolToUchar(b))
}
func (o *Options) SetFilterPolicy(fp *FilterPolicy) {
var policy *C.rocksdb_filterpolicy_t
if fp != nil {
policy = fp.Policy
}
C.rocksdb_options_set_filter_policy(o.Opt, policy)
}
func (o *Options) SetMaxBackgroundCompactions(n int) {
C.rocksdb_options_set_max_background_compactions(o.Opt, C.int(n))
}
func (o *Options) SetMaxBackgroundFlushes(n int) {
C.rocksdb_options_set_max_background_flushes(o.Opt, C.int(n))
}
func (o *Options) SetLevel0SlowdownWritesTrigger(n int) {
C.rocksdb_options_set_level0_slowdown_writes_trigger(o.Opt, C.int(n))
}
func (o *Options) SetLevel0StopWritesTrigger(n int) {
C.rocksdb_options_set_level0_stop_writes_trigger(o.Opt, C.int(n))
}
func (o *Options) SetTargetFileSizeBase(n int) {
C.rocksdb_options_set_target_file_size_base(o.Opt, C.uint64_t(uint64(n)))
}
func (ro *ReadOptions) Close() {
C.rocksdb_readoptions_destroy(ro.Opt)
}
func (ro *ReadOptions) SetVerifyChecksums(b bool) {
C.rocksdb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b))
}
func (ro *ReadOptions) SetFillCache(b bool) {
C.rocksdb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
}
func (wo *WriteOptions) Close() {
C.rocksdb_writeoptions_destroy(wo.Opt)
}
func (wo *WriteOptions) SetSync(b bool) {
C.rocksdb_writeoptions_set_sync(wo.Opt, boolToUchar(b))
}

View File

@ -0,0 +1,36 @@
// +build rocksdb
#include "rocksdb_ext.h"
#include <stdlib.h>
#include <string>
extern "C" {
unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_seek_to_first(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_seek_to_last(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t* iter, const char* k, size_t klen) {
rocksdb_iter_seek(iter, k, klen);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_next(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_prev(iter);
return rocksdb_iter_valid(iter);
}
}

View File

@ -0,0 +1,24 @@
// +build rocksdb
#ifndef ROCKSDB_EXT_H
#define ROCKSDB_EXT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "rocksdb/c.h"
// Below iterator functions like rocksdb iterator but returns valid status for iterator
extern unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t*, const char* k, size_t klen);
extern unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t*);
#ifdef __cplusplus
}
#endif
#endif

46
store/rocksdb/util.go Normal file
View File

@ -0,0 +1,46 @@
// +build rocksdb
package rocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
func boolToUchar(b bool) C.uchar {
uc := C.uchar(0)
if b {
uc = C.uchar(1)
}
return uc
}
func ucharToBool(uc C.uchar) bool {
if uc == C.uchar(0) {
return false
}
return true
}
func saveError(errStr *C.char) error {
if errStr != nil {
gs := C.GoString(errStr)
C.free(unsafe.Pointer(errStr))
return fmt.Errorf(gs)
}
return nil
}
func slice(p unsafe.Pointer, n int) []byte {
var b []byte
pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pbyte.Data = uintptr(p)
pbyte.Len = n
pbyte.Cap = n
return b
}