diff --git a/README.md b/README.md index 74757e0..35558a3 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/siddontang/ledisdb.svg?branch=develop)](https://travis-ci.org/siddontang/ledisdb) -Ledisdb is a high-performance NoSQL database, similar to Redis, written in [Go](http://golang.org/). It supports many data structures including kv, list, hash, zset, bitmap,set. +Ledisdb is a high-performance NoSQL database, similar to Redis, written in [Go](http://golang.org/). It supports many data structures including kv, list, hash, zset, set. LedisDB now supports multiple different databases as backends. @@ -11,7 +11,7 @@ LedisDB now supports multiple different databases as backends. ## Features -+ Rich data structure: KV, List, Hash, ZSet, Bitmap, Set. ++ Rich data structure: KV, List, Hash, ZSet, Set. + Data storage is not limited by RAM. + Various backends supported: LevelDB, goleveldb, LMDB, RocksDB, BoltDB, RAM. + Supports transactions using LMDB or BotlDB. diff --git a/cmd/ledis-cli/const.go b/cmd/ledis-cli/const.go index 9fc72dd..560a78a 100644 --- a/cmd/ledis-cli/const.go +++ b/cmd/ledis-cli/const.go @@ -1,25 +1,14 @@ -//This file was generated by .tools/generate_commands.py on Mon Mar 02 2015 14:43:25 +0800 +//This file was generated by .tools/generate_commands.py on Tue Mar 03 2015 08:58:38 +0800 package main var helpCommands = [][]string{ {"APPEND", "key value", "KV"}, - {"BCOUNT", "key [start end]", "Bitmap"}, - {"BDELETE", "key", "ZSet"}, {"BEGIN", "-", "Transaction"}, - {"BEXPIRE", "key seconds", "Bitmap"}, - {"BEXPIREAT", "key timestamp", "Bitmap"}, - {"BGET", "key", "Bitmap"}, - {"BGETBIT", "key offset", "Bitmap"}, {"BITCOUNT", "key [start] [end]", "KV"}, {"BITOP", "operation destkey key [key ...]", "KV"}, {"BITPOS", "key bit [start] [end]", "KV"}, {"BLPOP", "key [key ...] timeout", "List"}, - {"BMSETBIT", "key offset value [offset value ...]", "Bitmap"}, - {"BOPT", "operation destkey key [key ...]", "Bitmap"}, - {"BPERSIST", "key", "Bitmap"}, {"BRPOP", "key [key ...] timeout", "List"}, - {"BSETBIT", "key offset value", "Bitmap"}, - {"BTTL", "key", "Bitmap"}, {"COMMIT", "-", "Transaction"}, {"CONFIG REWRITE", "-", "Server"}, {"DECR", "key", "KV"}, diff --git a/doc/DiffRedis.md b/doc/DiffRedis.md index a20cc1f..7722be3 100644 --- a/doc/DiffRedis.md +++ b/doc/DiffRedis.md @@ -5,10 +5,6 @@ so you can easily write your own LedisDB client based on a Redis one. Before you write a client, you must know some differences between LedisDB and Redis. -## Data Structure - -LedisDB has no Strings data type but KV and Bitmap, any some Keys and Strings commands in Redis will only affect KV data, and "bit" commands affect Bitmap. - ## Del In Redis, `del` can delete all type data, like String, Hash, List, etc, but in LedisDB, `del` can only delete KV data. To delete other type data, you will use "clear" commands. @@ -17,8 +13,7 @@ In Redis, `del` can delete all type data, like String, Hash, List, etc, but in L + Hash: `hclear`, `hmclear` + List: `lclear`, `lmclear` + Set: `sclear`, `smclear` -+ Zset: `zclear`, `zmclear` -+ Bitmap: `bclear`, `bmclear` ++ ZSet: `zclear`, `zmclear` ## Expire, Persist, and TTL @@ -29,7 +24,6 @@ The same for Del. + List: `lexpire`, `lpersist`, `lttl` + Set: `sexpire`, `spersist`, `sttl` + Zset: `zexpire`, `zpersist`, `zttl` -+ Bitmap: `bexpire`, `bpersist`, `bttl` ## ZSet @@ -47,14 +41,14 @@ Transaction API: ## Scan -LedisDB supplies `xscan`, `xrevscan`, etc, to fetch data iteratively and reverse iteratively. +LedisDB supplies `xscan`, `xhscan`, `xsscan`, `xzscan` to fetch data iteratively and reverse iteratively. -+ KV: `xscan`, `xrevscan` -+ Hash: `hxscan`, `hxrevscan`, `xhscan`, `xhrevscan` -+ List: `lxscan`, `lxrevscan`, `xlscan`, `xlrevscan` -+ Set: `sxscan` , `sxrevscan`, `xsscan`, `xsrevscan` -+ Zset: `zxscan`, `zxrevscan`, `xzscan`, `xzrevscan` -+ Bitmap: `bxscan`, `bxrevscan`, `xbscan`, `xbrevscan` +``` +XSCAN type cursor [MATCH match] [COUNT count] +XHSCAN key cursor [MATCH match] [COUNT count] +XSSCAN key cursor [MATCH match] [COUNT count] +XZSCAN key cursor [MATCH match] [COUNT count] +``` ## DUMP diff --git a/doc/commands.json b/doc/commands.json index 281fd07..5479969 100644 --- a/doc/commands.json +++ b/doc/commands.json @@ -1,59 +1,4 @@ { - "BCOUNT": { - "arguments": "key [start end]", - "group": "Bitmap", - "readonly": true - }, - "BDELETE": { - "arguments": "key", - "group": "ZSet", - "readonly": false - }, - "BEXPIRE": { - "arguments": "key seconds", - "group": "Bitmap", - "readonly": false - }, - "BEXPIREAT": { - "arguments": "key timestamp", - "group": "Bitmap", - "readonly": false - }, - "BGET": { - "arguments": "key", - "group": "Bitmap", - "readonly": true - }, - "BGETBIT": { - "arguments": "key offset", - "group": "Bitmap", - "readonly": true - }, - "BMSETBIT": { - "arguments": "key offset value [offset value ...]", - "group": "Bitmap", - "readonly": false - }, - "BOPT": { - "arguments": "operation destkey key [key ...]", - "group": "Bitmap", - "readonly": false - }, - "BPERSIST": { - "arguments": "key", - "group": "Bitmap", - "readonly": false - }, - "BSETBIT": { - "arguments": "key offset value", - "group": "Bitmap", - "readonly": false - }, - "BTTL": { - "arguments": "key", - "group": "Bitmap", - "readonly": true - }, "DECR": { "arguments": "key", "group": "KV", diff --git a/doc/commands.md b/doc/commands.md index 2e4bce6..c76447a 100644 --- a/doc/commands.md +++ b/doc/commands.md @@ -1,158 +1,157 @@ ## Summary -ledisdb use redis protocol called RESP(REdis Serialization Protocol), [here](http://redis.io/topics/protocol). +Ledisdb use redis protocol called RESP(REdis Serialization Protocol), [here](http://redis.io/topics/protocol). -ledisdb all commands return RESP format and it will use `int64` instead of `RESP integer`, `string` instead of `RESP simple string`, `bulk string` instead of `RESP bulk string`, and `array` instead of `RESP arrays` below. +Ledisdb all commands return RESP format and it will use `int64` instead of `RESP integer`, `string` instead of `RESP simple string`, `bulk string` instead of `RESP bulk string`, and `array` instead of `RESP arrays` below. -Table of Contents -================= +Most of the Ledisdb's commands are the same as Redis's, you can see the redis commands document for detailed information too. + + +**Commands List** -- [Summary](#summary) - [KV](#kv) - - [DECR key](#decr-key) - - [DECRBY key decrement](#decrby-key-decrement) - - [DEL key [key ...]](#del-key-key-) - - [EXISTS key](#exists-key) - - [GET key](#get-key) - - [GETSET key value](#getset-key-value) - - [INCR key](#incr-key) - - [INCRBY key increment](#incrby-key-increment) - - [MGET key [key ...]](#mget-key-key-) - - [MSET key value [key value ...]](#mset-key-value-key-value-) - - [SET key value](#set-key-value) - - [SETNX key value](#setnx-key-value) - - [SETEX key seconds value](#setex-key-seconds-value) - - [EXPIRE key seconds](#expire-key-seconds) - - [EXPIREAT key timestamp](#expireat-key-timestamp) - - [TTL key](#ttl-key) - - [PERSIST key](#persist-key) - - [DUMP key](#dump-key) + - [DECR key](#decr-key) + - [DECRBY key decrement](#decrby-key-decrement) + - [DEL key [key ...]](#del-key-key-) + - [EXISTS key](#exists-key) + - [GET key](#get-key) + - [GETSET key value](#getset-key-value) + - [INCR key](#incr-key) + - [INCRBY key increment](#incrby-key-increment) + - [MGET key [key ...]](#mget-key-key-) + - [MSET key value [key value ...]](#mset-key-value-key-value-) + - [SET key value](#set-key-value) + - [SETNX key value](#setnx-key-value) + - [SETEX key seconds value](#setex-key-seconds-value) + - [EXPIRE key seconds](#expire-key-seconds) + - [EXPIREAT key timestamp](#expireat-key-timestamp) + - [TTL key](#ttl-key) + - [PERSIST key](#persist-key) + - [DUMP key](#dump-key) + - [APPEND key value](#append-key-value) + - [GETRANGE key start end](#getrange-key-start-end) + - [SETRANGE key offset value](#setrange-key-offset-value) + - [STRLEN key](#strlen-key) + - [BITCOUNT key [start] [end]](#bitcount-key-start-end) + - [BITOP operation destkey key [key ...]](#bitop-operation-destkey-key-key-) + - [BITPOS key bit [start] [end]](#bitpos-key-bit-start-end) + - [GETBIT key offset](#getbit-key-offset) + - [SETBIT key offset value](#setbit-key-offset-value) - [Hash](#hash) - - [HDEL key field [field ...]](#hdel-key-field-field-) - - [HEXISTS key field](#hexists-key-field) - - [HGET key field](#hget-key-field) - - [HGETALL key](#hgetall-key) - - [HINCRBY key field increment](#hincrby-key-field-increment) - - [HKEYS key](#hkeys-key) - - [HLEN key](#hlen-key) - - [HMGET key field [field ...]](#hmget-key-field-field-) - - [HMSET key field value [field value ...]](#hmset-key-field-value-field-value-) - - [HSET key field value](#hset-key-field-value) - - [HVALS key](#hvals-key) - - [HCLEAR key](#hclear-key) - - [HMCLEAR key [key ...]](#hmclear-key-key) - - [HEXPIRE key seconds](#hexpire-key-seconds) - - [HEXPIREAT key timestamp](#hexpireat-key-timestamp) - - [HTTL key](#httl-key) - - [HPERSIST key](#hpersist-key) - - [HDUMP key](#hdump-key) + - [HDEL key field [field ...]](#hdel-key-field-field-) + - [HEXISTS key field](#hexists-key-field) + - [HGET key field](#hget-key-field) + - [HGETALL key](#hgetall-key) + - [HINCRBY key field increment](#hincrby-key-field-increment) + - [HKEYS key](#hkeys-key) + - [HLEN key](#hlen-key) + - [HMGET key field [field ...]](#hmget-key-field-field-) + - [HMSET key field value [field value ...]](#hmset-key-field-value-field-value-) + - [HSET key field value](#hset-key-field-value) + - [HVALS key](#hvals-key) + - [HCLEAR key](#hclear-key) + - [HMCLEAR key [key...]](#hmclear-key-key) + - [HEXPIRE key seconds](#hexpire-key-seconds) + - [HEXPIREAT key timestamp](#hexpireat-key-timestamp) + - [HTTL key](#httl-key) + - [HPERSIST key](#hpersist-key) + - [HDUMP key](#hdump-key) - [List](#list) - - [BLPOP key [key ...] timeout](#blpop-key-key--timeout) - - [BRPOP key [key ...] timeout](#brpop-key-key--timeout) - - [LINDEX key index](#lindex-key-index) - - [LLEN key](#llen-key) - - [LPOP key](#lpop-key) - - [LRANGE key start stop](#lrange-key-start-stop) - - [LPUSH key value [value ...]](#lpush-key-value-value-) - - [RPOP key](#rpop-keuser-content-y) - - [RPUSH key value [value ...]](#rpush-key-value-value-) - - [LCLEAR key](#lclear-key) - - [LMCLEAR key [key...]](#lmclear-key-key-) - - [LEXPIRE key seconds](#lexpire-key-seconds) - - [LEXPIREAT key timestamp](#lexpireat-key-timestamp) - - [LTTL key](#lttl-key) - - [LPERSIST key](#lpersist-key) - - [LDUMP key](#ldump-key) + - [BLPOP key [key ...] timeout](#blpop-key-key--timeout) + - [BRPOP key [key ...] timeout](#brpop-key-key--timeout) + - [LINDEX key index](#lindex-key-index) + - [LLEN key](#llen-key) + - [LPOP key](#lpop-key) + - [LRANGE key start stop](#lrange-key-start-stop) + - [LPUSH key value [value ...]](#lpush-key-value-value-) + - [RPOP key](#rpop-key) + - [RPUSH key value [value ...]](#rpush-key-value-value-) + - [LCLEAR key](#lclear-key) + - [LMCLEAR key [key ...]](#lmclear-key-key-) + - [LEXPIRE key seconds](#lexpire-key-seconds) + - [LEXPIREAT key timestamp](#lexpireat-key-timestamp) + - [LTTL key](#lttl-key) + - [LPERSIST key](#lpersist-key) + - [LDUMP key](#ldump-key) - [Set](#set) - - [SADD key member [member ...]](#sadd-key-member-member-) - - [SCARD key](#scard-key) - - [SDIFF key [key ...]](#sdiff-key-key-) - - [SDIFFSTORE destination key [key ...]](#sdiffstore-destination-key-key-) - - [SINTER key [key ...]](#sinter-key-key-) - - [SINTERSTORE destination key [key ...]](#sinterstore-destination-key-key-) - - [SISMEMBER key member](#sismember-key-member) - - [SMEMBERS key](#smembers-key) - - [SREM key member [member ...]](#srem-key-member-member-) - - [SUNION key [key ...]](#sunion-key-key-) - - [SUNIONSTORE destination key [key ...]](#sunionstore-destination-key-key-) - - [SCLEAR key](#sclear-key) - - [SMCLEAR key [key...]](#smclear-key-key) - - [SEXPIRE key seconds](#sexpire-key-seconds) - - [SEXPIREAT key timestamp](#sexpireat-key-timestamp) - - [STTL key](#sttl-key) - - [SPERSIST key](#spersist-key) - - [SDUMP key](#sdump-key) + - [SADD key member [member ...]](#sadd-key-member-member-) + - [SCARD key](#scard-key) + - [SDIFF key [key ...]](#sdiff-key-key-) + - [SDIFFSTORE destination key [key ...]](#sdiffstore-destination-key-key-) + - [SINTER key [key ...]](#sinter-key-key-) + - [SINTERSTORE destination key [key ...]](#sinterstore--destination-key-key-) + - [SISMEMBER key member](#sismember--key-member) + - [SMEMBERS key](#smembers-key) + - [SREM key member [member ...]](#srem--key-member-member-) + - [SUNION key [key ...]](#sunion-key-key-) + - [SUNIONSTORE destination key [key]](#sunionstore-destination-key-key) + - [SCLEAR key](#sclear-key) + - [SMCLEAR key [key ...]](#smclear-key-key-) + - [SEXPIRE key seconds](#sexpire-key-seconds) + - [SEXPIREAT key timestamp](#sexpireat-key-timestamp) + - [STTL key](#sttl-key) + - [SPERSIST key](#spersist-key) + - [SDUMP key](#sdump-key) - [ZSet](#zset) - - [ZADD key score member [score member ...]](#zadd-key-score-member-score-member-) - - [ZCARD key](#zcard-key) - - [ZCOUNT key min max](#zcount-key-min-max) - - [ZINCRBY key increment member](#zincrby-key-increment-member) - - [ZRANGE key start stop [WITHSCORES]](#zrange-key-start-stop-withscores) - - [ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]](#zrangebyscore-key-min-max-withscores-limit-offset-count) - - [ZRANK key member](#zrank-key-member) - - [ZREM key member [member ...]](#zrem-key-member-member-) - - [ZREMRANGEBYRANK key start stop](#zremrangebyrank-key-start-stop) - - [ZREMRANGEBYSCORE key min max](#zremrangebyscore-key-min-max) - - [ZREVRANGE key start stop [WITHSCORES]](#zrevrange-key-start-stop-withscores) - - [ZREVRANGEBYSCORE key max min [WITHSCORES] [LIMIT offset count]](#zrevrangebyscore-key-max-min-withscores-limit-offset-count) - - [ZREVRANK key member](#zrevrank-key-member) - - [ZSCORE key member](#zscore-key-member) - - [ZCLEAR key](#zclear-key) - - [ZMCLEAR key [key ...]](#zmclear-key-key-) - - [ZEXPIRE key seconds](#zexpire-key-seconds) - - [ZEXPIREAT key timestamp](#zexpireat-key-timestamp) - - [ZTTL key](#zttl-key) - - [ZPERSIST key](#zpersist-key) - - [ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX] -](#zunionstore-destination-numkeys-key-key--weights-weight-weight--aggregate-summinmax) - - [ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX] -](#zinterstore-destination-numkeys-key-key--weights-weight-weight--aggregate-summinmax) - - [ZRANGEBYLEX key min max [LIMIT offset count]](#zrangebylex-key-min-max-limit-offset-count) - - [ZREMRANGEBYLEX key min max](#zremrangebylex-key-min-max) - - [ZLEXCOUNT key min max](#zlexcount-key-min-max) - - [ZDUMP key](#zdump-key) -- [Bitmap](#bitmap) - - [BGET key](#bget-key) - - [BGETBIT key offset](#bgetbit-key-offset) - - [BSETBIT key offset value](#bsetbit-key-offset-value) - - [BMSETBIT key offset value[offset value ...]](#bmsetbit-key-offset-value-offset-value-) - - [BOPT operation destkey key [key ...]](#bopt-operation-destkey-key-key-) - - [BCOUNT key [start, end]](#bcount-key-start-end) - - [BEXPIRE key seconds](#bexpire-key-seconds) - - [BEXPIREAT key timestamp](#bexpireat-key-timestamp) - - [BTTL key](#bttl-key) - - [BPERSIST key](#bpersist-key) + - [ZADD key score member [score member ...]](#zadd-key-score-member-score-member-) + - [ZCARD key](#zcard-key) + - [ZCOUNT key min max](#zcount-key-min-max) + - [ZINCRBY key increment member](#zincrby-key-increment-member) + - [ZRANGE key start stop [WITHSCORES]](#zrange-key-start-stop-withscores) + - [ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]](#zrangebyscore-key-min-max-withscores-limit-offset-count) + - [ZRANK key member](#zrank-key-member) + - [ZREM key member [member ...]](#zrem-key-member-member-) + - [ZREMRANGEBYRANK key start stop](#zremrangebyrank-key-start-stop) + - [ZREMRANGEBYSCORE key min max](#zremrangebyscore-key-min-max) + - [ZREVRANGE key start stop [WITHSCORES]](#zrevrange-key-start-stop-withscores) + - [ZREVRANGEBYSCORE key max min [WITHSCORES] [LIMIT offset count]](#zrevrangebyscore--key-max-min-withscores-limit-offset-count) + - [ZREVRANK key member](#zrevrank-key-member) + - [ZSCORE key member](#zscore-key-member) + - [ZCLEAR key](#zclear-key) + - [ZMCLEAR key [key ...]](#zmclear-key-key-) + - [ZEXPIRE key seconds](#zexpire-key-seconds) + - [ZEXPIREAT key timestamp](#zexpireat-key-timestamp) + - [ZTTL key](#zttl-key) + - [ZPERSIST key](#zpersist-key) + - [ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]](#zunionstore-destination-numkeys-key-key--weights-weight-weight--aggregate-sum|min|max) + - [ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX]](#zinterstore-destination-numkeys-key-key--weights-weight-weight--aggregate-sum|min|max) + - [ZRANGEBYLEX key min max [LIMIT offset count]](#zrangebylex-key-min-max-limit-offset-count) + - [ZREMRANGEBYLEX key min max](#zremrangebylex-key-min-max) + - [ZLEXCOUNT key min max](#zlexcount-key-min-max) + - [ZDUMP key](#zdump-key) - [Scan](#scan) - - [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count) - - [XHSCAN key cursor [MATCH match] [COUNT count]](#xhscan-key-cursor-match-match-count-count) - - [XSSCAN key cursor [MATCH match] [COUNT count]](#xsscan-key-cursor-match-match-count-count) - - [XZSCAN key cursor [MATCH match] [COUNT count]](#xzscan-key-cursor-match-match-count-count) + - [XSCAN type cursor [MATCH match] [COUNT count]](#xscan-type-cursor-match-match-count-count) + - [XHSCAN key cursor [MATCH match] [COUNT count]](#xhscan-key-cursor-match-match-count-count) + - [XSSCAN key cursor [MATCH match] [COUNT count]](#xsscan-key-cursor-match-match-count-count) + - [XZSCAN key cursor [MATCH match] [COUNT count]](#xzscan-key-cursor-match-match-count-count) - [Replication](#replication) - - [SLAVEOF host port [RESTART] [READONLY]](#slaveof-host-port-restart-readonly) - - [FULLSYNC [NEW]](#fullsync-new) - - [SYNC logid](#sync-logid) + - [SLAVEOF host port [RESTART] [READONLY]](#slaveof-host-port-restart-readonly) + - [FULLSYNC [NEW]](#fullsync-new) + - [SYNC logid](#sync-logid) - [Server](#server) - - [PING](#ping) - - [ECHO message](#echo-message) - - [SELECT index](#select-index) - - [FLUSHALL](#flushall) - - [FLUSHDB](#flushdb) - - [INFO [section]](#info-section) - - [TIME](#time) - - [CONFIG REWRITE](#config-rewrite) - - [RESTORE key ttl value](#restore-key-ttl-value) - - [ROLE](#role) + - [PING](#ping) + - [ECHO message](#echo-message) + - [SELECT index](#select-index) + - [FLUSHALL](#flushall) + - [FLUSHDB](#flushdb) + - [INFO [section]](#info-section) + - [TIME](#time) + - [CONFIG REWRITE](#config-rewrite) + - [RESTORE key ttl value](#restore-key-ttl-value) + - [ROLE](#role) - [Transaction](#transaction) - - [BEGIN](#begin) - - [ROLLBACK](#rollback) - - [COMMIT](#commit) + - [BEGIN](#begin) + - [ROLLBACK](#rollback) + - [COMMIT](#commit) - [Script](#script) - - [EVAL script numkeys key [key ...] arg [arg ...]](#eval-script-numkeys-key-key--arg-arg-) - - [EVALSHA sha1 numkeys key [key ...] arg [arg ...]](#evalsha-sha1-numkeys-key-key--arg-arg-) - - [SCRIPT LOAD script](#script-load-script) - - [SCRIPT EXISTS script [script ...]](#script-exists-script-script-) - - [SCRIPT FLUSH](#script-flush) + - [EVAL script numkeys key [key ...] arg [arg ...]](#eval-script-numkeys-key-key--arg-arg-) + - [EVALSHA sha1 numkeys key [key ...] arg [arg ...]](#evalsha-sha1-numkeys-key-key--arg-arg-) + - [SCRIPT LOAD script](#script-load-script) + - [SCRIPT EXISTS script [script ...]](#script-exists-script-script-) + - [SCRIPT FLUSH](#script-flush) + + ## KV @@ -524,6 +523,25 @@ ledis>DUMP mykey "\x00\xc0\n\x06\x00\xf8r?\xc5\xfb\xfb_(" ``` +### APPEND key value + +### GETRANGE key start end + +### SETRANGE key offset value + +### STRLEN key + +### BITCOUNT key [start] [end] + +### BITOP operation destkey key [key ...] + +### BITPOS key bit [start] [end] + +### GETBIT key offset + +### SETBIT key offset value + + ## Hash ### HDEL key field [field ...] @@ -2332,164 +2350,6 @@ ledis> ZLEXCOUNT myzset - [c See [DUMP](#dump-key) for more information. -## Bitmap - -### BGET key - -Returns the whole binary data stored at `key`. - -**Return value** - -bulk: the raw value of key, or nil when key does not exist. - -**Examples** - -``` -ledis> BMSETBIT flag 0 1 5 1 6 1 -(integer) 3 -ledis> BGET flag -a -``` - - -### BGETBIT key offset - -Returns the bit value at `offset` in the string value stored at `key`. -When *offset* beyond the data length, ot the target data is not exist, the bit value will be 0 always. - -**Return value** - -int64 : the bit value stored at offset. - -**Examples** - -``` -ledis> BSETBIT flag 1024 1 -(integer) 0 -ledis> BGETBIT flag 0 -(integer) 0 -ledis> BGETBIT flag 1024 -(integer) 1 -ledis> BGETBIT flag 65535 -(integer) 0 -``` - - -### BSETBIT key offset value - -Sets or clear the bit at `offset` in the binary data sotred at `key`. -The bit is either set or cleared depending on `value`, which can be either `0` or `1`. -The *offset* argument is required to be qual to 0, and smaller than -2^23 (this means bitmap limits to 8MB). - -**Return value** - -int64 : the original bit value stored at offset. - -**Examples** - -``` -ledis> BSETBIT flag 0 1 -(integer) 0 -ledis> BSETBIT flag 0 0 -(integer) 1 -ledis> BGETBIT flag 0 99 -ERR invalid command param -``` - -### BMSETBIT key offset value [offset value ...] -Sets the given *offset* to their respective values. - -**Return value** - -int64 : The number of input *offset* - -**Examples** - -``` -ledis> BMSETBIT flag 0 1 1 1 2 0 3 1 -(integer) 4 -ledis> BCOUNT flag -(integer) 3 -``` - - -### BOPT operation destkey key [key ...] -Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key. - -**Return value** - -Int64: -The size of the string stored in the destination key, that is equal to the size of the longest input string. -**Examples** - -``` -ledis> BMSETBIT a 0 1 2 1 -(integer) 2 -ledis> BMSETBIT b 1 1 -(integer) 1 -ledis> BOPT AND res a b -(integer) 3 -ledis> BCOUNT res -(integer) 0 -ledis> BOPT OR res2 a b -(integer) 3 -ledis> BCOUNT res2 -(integer) 3 -ledis> BOPT XOR res3 a b -(integer) 3 -ledis> BCOUNT res3 -(integer) 3 -``` - -### BCOUNT key [start end] - -Count the number of set bits in a bitmap. - -**Return value** - -int64 : The number of bits set to 1. - -**Examples** - -``` -ledis> BMSETBIT flag 0 1 5 1 6 1 -(integer) 3 -ledis> BGET flag -a -ledis> BCOUNT flag -(integer) 3 -ledis> BCOUNT flag 0 0s -(integer) 1 -ledis> BCOUNT flag 0 4 -(integer) 1 -ledis> BCOUNT flag 0 5 -(integer) 2 -ledis> BCOUNT flag 5 6 -(integer) 2 -``` - - -### BEXPIRE key seconds - -(refer to [EXPIRE](#expire-key-seconds) api for other types) - - -### BEXPIREAT key timestamp - -(refer to [EXPIREAT](#expireat-key-timestamp) api for other types) - - -### BTTL key - -(refer to [TTL](#ttl-key) api for other types) - - -### BPERSIST key - -(refer to [PERSIST](#persist-key) api for other types) - - ## Scan ### XSCAN type cursor [MATCH match] [COUNT count] diff --git a/ledis/const.go b/ledis/const.go index 9bb0b70..7305b79 100644 --- a/ledis/const.go +++ b/ledis/const.go @@ -44,19 +44,19 @@ const ( // for backend store const ( - NoneType byte = 0 - KVType byte = 1 - HashType byte = 2 - HSizeType byte = 3 - ListType byte = 4 - LMetaType byte = 5 - ZSetType byte = 6 - ZSizeType byte = 7 - ZScoreType byte = 8 - BitType byte = 9 - BitMetaType byte = 10 - SetType byte = 11 - SSizeType byte = 12 + NoneType byte = 0 + KVType byte = 1 + HashType byte = 2 + HSizeType byte = 3 + ListType byte = 4 + LMetaType byte = 5 + ZSetType byte = 6 + ZSizeType byte = 7 + ZScoreType byte = 8 + // BitType byte = 9 + // BitMetaType byte = 10 + SetType byte = 11 + SSizeType byte = 12 maxDataType byte = 100 @@ -73,16 +73,16 @@ const ( var ( TypeName = map[byte]string{ - KVType: "kv", - HashType: "hash", - HSizeType: "hsize", - ListType: "list", - LMetaType: "lmeta", - ZSetType: "zset", - ZSizeType: "zsize", - ZScoreType: "zscore", - BitType: "bit", - BitMetaType: "bitmeta", + KVType: "kv", + HashType: "hash", + HSizeType: "hsize", + ListType: "list", + LMetaType: "lmeta", + ZSetType: "zset", + ZSizeType: "zsize", + ZScoreType: "zscore", + // BitType: "bit", + // BitMetaType: "bitmeta", SetType: "set", SSizeType: "ssize", ExpTimeType: "exptime", diff --git a/ledis/event.go b/ledis/event.go index 2a3b54a..b9a4833 100644 --- a/ledis/event.go +++ b/ledis/event.go @@ -81,20 +81,20 @@ func formatEventKey(buf []byte, k []byte) ([]byte, error) { buf = append(buf, ' ') buf = strconv.AppendInt(buf, score, 10) } - case BitType: - if key, seq, err := db.bDecodeBinKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendUint(buf, uint64(seq), 10) - } - case BitMetaType: - if key, err := db.bDecodeMetaKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - } + // case BitType: + // if key, seq, err := db.bDecodeBinKey(k); err != nil { + // return nil, err + // } else { + // buf = strconv.AppendQuote(buf, hack.String(key)) + // buf = append(buf, ' ') + // buf = strconv.AppendUint(buf, uint64(seq), 10) + // } + // case BitMetaType: + // if key, err := db.bDecodeMetaKey(k); err != nil { + // return nil, err + // } else { + // buf = strconv.AppendQuote(buf, hack.String(key)) + // } case SetType: if key, member, err := db.sDecodeSetKey(k); err != nil { return nil, err diff --git a/ledis/ledis.go b/ledis/ledis.go index c2d7f36..d5b01d8 100644 --- a/ledis/ledis.go +++ b/ledis/ledis.go @@ -174,7 +174,7 @@ func (l *Ledis) checkTTL() { c.register(ListType, db.listBatch, db.lDelete) c.register(HashType, db.hashBatch, db.hDelete) c.register(ZSetType, db.zsetBatch, db.zDelete) - c.register(BitType, db.binBatch, db.bDelete) + // c.register(BitType, db.binBatch, db.bDelete) c.register(SetType, db.setBatch, db.sDelete) l.tcs[i] = c diff --git a/ledis/ledis_db.go b/ledis/ledis_db.go index 4e2b490..2c755e1 100644 --- a/ledis/ledis_db.go +++ b/ledis/ledis_db.go @@ -36,8 +36,8 @@ type DB struct { listBatch *batch hashBatch *batch zsetBatch *batch - binBatch *batch - setBatch *batch + // binBatch *batch + setBatch *batch status uint8 @@ -60,7 +60,7 @@ func (l *Ledis) newDB(index uint8) *DB { d.listBatch = d.newBatch() d.hashBatch = d.newBatch() d.zsetBatch = d.newBatch() - d.binBatch = d.newBatch() + // d.binBatch = d.newBatch() d.setBatch = d.newBatch() d.lbkeys = newLBlockKeys() @@ -86,7 +86,6 @@ func (db *DB) FlushAll() (drop int64, err error) { db.lFlush, db.hFlush, db.zFlush, - db.bFlush, db.sFlush} for _, flush := range all { @@ -117,9 +116,9 @@ func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { case ZSetType: deleteFunc = db.zDelete metaDataType = ZSizeType - case BitType: - deleteFunc = db.bDelete - metaDataType = BitMetaType + // case BitType: + // deleteFunc = db.bDelete + // metaDataType = BitMetaType case SetType: deleteFunc = db.sDelete metaDataType = SSizeType diff --git a/ledis/multi.go b/ledis/multi.go index 29abe34..0b02417 100644 --- a/ledis/multi.go +++ b/ledis/multi.go @@ -44,7 +44,7 @@ func (db *DB) Multi() (*Multi, error) { m.DB.listBatch = m.newBatch() m.DB.hashBatch = m.newBatch() m.DB.zsetBatch = m.newBatch() - m.DB.binBatch = m.newBatch() + // m.DB.binBatch = m.newBatch() m.DB.setBatch = m.newBatch() m.DB.lbkeys = db.lbkeys diff --git a/ledis/scan.go b/ledis/scan.go index c465168..466f1ec 100644 --- a/ledis/scan.go +++ b/ledis/scan.go @@ -162,8 +162,8 @@ func (db *DB) encodeScanKey(storeDataType byte, key []byte) ([]byte, error) { return db.zEncodeSizeKey(key), nil case SSizeType: return db.sEncodeSizeKey(key), nil - case BitMetaType: - return db.bEncodeMetaKey(key), nil + // case BitMetaType: + // return db.bEncodeMetaKey(key), nil default: return nil, errDataType } diff --git a/ledis/t_bit.go b/ledis/t_bit.go index d89f15b..ebb099c 100644 --- a/ledis/t_bit.go +++ b/ledis/t_bit.go @@ -1,931 +1,931 @@ package ledis -import ( - "encoding/binary" - "errors" - "github.com/siddontang/go/log" - "github.com/siddontang/go/num" - "github.com/siddontang/ledisdb/store" - "sort" - "time" -) - -/* - We will not maintain bitmap anymore, and will add bit operations for kv type later. - Use your own risk. -*/ - -const ( - OPand uint8 = iota + 1 - OPor - OPxor - OPnot -) - -type BitPair struct { - Pos int32 - Val uint8 -} - -type segBitInfo struct { - Seq uint32 - Off uint32 - Val uint8 -} - -type segBitInfoArray []segBitInfo - -const ( - // byte - segByteWidth uint32 = 9 - segByteSize uint32 = 1 << segByteWidth - - // bit - segBitWidth uint32 = segByteWidth + 3 - segBitSize uint32 = segByteSize << 3 - - maxByteSize uint32 = 8 << 20 - maxSegCount uint32 = maxByteSize / segByteSize - - minSeq uint32 = 0 - maxSeq uint32 = uint32((maxByteSize << 3) - 1) -) - -var fillBits = [...]uint8{1, 3, 7, 15, 31, 63, 127, 255} - -var emptySegment []byte = make([]byte, segByteSize, segByteSize) - -var fillSegment []byte = func() []byte { - data := make([]byte, segByteSize, segByteSize) - for i := uint32(0); i < segByteSize; i++ { - data[i] = 0xff - } - return data -}() - -var errBinKey = errors.New("invalid bin key") -var errOffset = errors.New("invalid offset") -var errDuplicatePos = errors.New("duplicate bit pos") - -func getBit(sz []byte, offset uint32) uint8 { - index := offset >> 3 - if index >= uint32(len(sz)) { - return 0 // error("overflow") - } - - offset -= index << 3 - return sz[index] >> offset & 1 -} - -func setBit(sz []byte, offset uint32, val uint8) bool { - if val != 1 && val != 0 { - return false // error("invalid val") - } - - index := offset >> 3 - if index >= uint32(len(sz)) { - return false // error("overflow") - } - - offset -= index << 3 - if sz[index]>>offset&1 != val { - sz[index] ^= (1 << offset) - } - return true -} - -func (datas segBitInfoArray) Len() int { - return len(datas) -} - -func (datas segBitInfoArray) Less(i, j int) bool { - res := (datas)[i].Seq < (datas)[j].Seq - if !res && (datas)[i].Seq == (datas)[j].Seq { - res = (datas)[i].Off < (datas)[j].Off - } - return res -} - -func (datas segBitInfoArray) Swap(i, j int) { - datas[i], datas[j] = datas[j], datas[i] -} - -func (db *DB) bEncodeMetaKey(key []byte) []byte { - mk := make([]byte, len(key)+2) - mk[0] = db.index - mk[1] = BitMetaType - - copy(mk[2:], key) - return mk -} - -func (db *DB) bDecodeMetaKey(bkey []byte) ([]byte, error) { - if len(bkey) < 2 || bkey[0] != db.index || bkey[1] != BitMetaType { - return nil, errBinKey - } - - return bkey[2:], nil -} - -func (db *DB) bEncodeBinKey(key []byte, seq uint32) []byte { - bk := make([]byte, len(key)+8) - - pos := 0 - bk[pos] = db.index - pos++ - bk[pos] = BitType - pos++ - - binary.BigEndian.PutUint16(bk[pos:], uint16(len(key))) - pos += 2 - - copy(bk[pos:], key) - pos += len(key) - - binary.BigEndian.PutUint32(bk[pos:], seq) - - return bk -} - -func (db *DB) bDecodeBinKey(bkey []byte) (key []byte, seq uint32, err error) { - if len(bkey) < 8 || bkey[0] != db.index { - err = errBinKey - return - } - - keyLen := binary.BigEndian.Uint16(bkey[2:4]) - if int(keyLen+8) != len(bkey) { - err = errBinKey - return - } - - key = bkey[4 : 4+keyLen] - seq = uint32(binary.BigEndian.Uint32(bkey[4+keyLen:])) - return -} - -func (db *DB) bCapByteSize(seq uint32, off uint32) uint32 { - var offByteSize uint32 = (off >> 3) + 1 - if offByteSize > segByteSize { - offByteSize = segByteSize - } - - return seq<= 0 { - offset += int32((uint32(tailSeq)<> segBitWidth - off &= (segBitSize - 1) - return -} - -func (db *DB) bGetMeta(key []byte) (tailSeq int32, tailOff int32, err error) { - var v []byte - - mk := db.bEncodeMetaKey(key) - v, err = db.bucket.Get(mk) - if err != nil { - return - } - - if v != nil { - tailSeq = int32(binary.LittleEndian.Uint32(v[0:4])) - tailOff = int32(binary.LittleEndian.Uint32(v[4:8])) - } else { - tailSeq = -1 - tailOff = -1 - } - return -} - -func (db *DB) bSetMeta(t *batch, key []byte, tailSeq uint32, tailOff uint32) { - ek := db.bEncodeMetaKey(key) - - buf := make([]byte, 8) - binary.LittleEndian.PutUint32(buf[0:4], tailSeq) - binary.LittleEndian.PutUint32(buf[4:8], tailOff) - - t.Put(ek, buf) - return -} - -func (db *DB) bUpdateMeta(t *batch, key []byte, seq uint32, off uint32) (tailSeq uint32, tailOff uint32, err error) { - var tseq, toff int32 - var update bool = false - - if tseq, toff, err = db.bGetMeta(key); err != nil { - return - } else if tseq < 0 { - update = true - } else { - tailSeq = uint32(num.MaxInt32(tseq, 0)) - tailOff = uint32(num.MaxInt32(toff, 0)) - update = (seq > tailSeq || (seq == tailSeq && off > tailOff)) - } - - if update { - db.bSetMeta(t, key, seq, off) - tailSeq = seq - tailOff = off - } - return -} - -func (db *DB) bDelete(t *batch, key []byte) (drop int64) { - mk := db.bEncodeMetaKey(key) - t.Delete(mk) - - minKey := db.bEncodeBinKey(key, minSeq) - maxKey := db.bEncodeBinKey(key, maxSeq) - it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose) - for ; it.Valid(); it.Next() { - t.Delete(it.RawKey()) - drop++ - } - it.Close() - - return drop -} - -func (db *DB) bGetSegment(key []byte, seq uint32) ([]byte, []byte, error) { - bk := db.bEncodeBinKey(key, seq) - segment, err := db.bucket.Get(bk) - if err != nil { - return bk, nil, err - } - return bk, segment, nil -} - -func (db *DB) bAllocateSegment(key []byte, seq uint32) ([]byte, []byte, error) { - bk, segment, err := db.bGetSegment(key, seq) - if err == nil && segment == nil { - segment = make([]byte, segByteSize, segByteSize) - } - return bk, segment, err -} - -func (db *DB) bIterator(key []byte) *store.RangeLimitIterator { - sk := db.bEncodeBinKey(key, minSeq) - ek := db.bEncodeBinKey(key, maxSeq) - return db.bucket.RangeIterator(sk, ek, store.RangeClose) -} - -func (db *DB) bSegAnd(a []byte, b []byte, res *[]byte) { - if a == nil || b == nil { - *res = nil - return - } - - data := *res - if data == nil { - data = make([]byte, segByteSize, segByteSize) - *res = data - } - - for i := uint32(0); i < segByteSize; i++ { - data[i] = a[i] & b[i] - } - return -} - -func (db *DB) bSegOr(a []byte, b []byte, res *[]byte) { - if a == nil || b == nil { - if a == nil && b == nil { - *res = nil - } else if a == nil { - *res = b - } else { - *res = a - } - return - } - - data := *res - if data == nil { - data = make([]byte, segByteSize, segByteSize) - *res = data - } - - for i := uint32(0); i < segByteSize; i++ { - data[i] = a[i] | b[i] - } - return -} - -func (db *DB) bSegXor(a []byte, b []byte, res *[]byte) { - if a == nil && b == nil { - *res = fillSegment - return - } - - if a == nil { - a = emptySegment - } - - if b == nil { - b = emptySegment - } - - data := *res - if data == nil { - data = make([]byte, segByteSize, segByteSize) - *res = data - } - - for i := uint32(0); i < segByteSize; i++ { - data[i] = a[i] ^ b[i] - } - - return -} - -func (db *DB) bExpireAt(key []byte, when int64) (int64, error) { - t := db.binBatch - t.Lock() - defer t.Unlock() - - if seq, _, err := db.bGetMeta(key); err != nil || seq < 0 { - return 0, err - } else { - db.expireAt(t, BitType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } - } - return 1, nil -} - -func (db *DB) bCountByte(val byte, soff uint32, eoff uint32) int32 { - if soff > eoff { - soff, eoff = eoff, soff - } - - mask := uint8(0) - if soff > 0 { - mask |= fillBits[soff-1] - } - if eoff < 7 { - mask |= (fillBits[7] ^ fillBits[eoff]) - } - mask = fillBits[7] ^ mask - - return bitsInByte[val&mask] -} - -func (db *DB) bCountSeg(key []byte, seq uint32, soff uint32, eoff uint32) (cnt int32, err error) { - if soff >= segBitSize || soff < 0 || - eoff >= segBitSize || eoff < 0 { - return - } - - var segment []byte - if _, segment, err = db.bGetSegment(key, seq); err != nil { - return - } - - if segment == nil { - return - } - - if soff > eoff { - soff, eoff = eoff, soff - } - - headIdx := int(soff >> 3) - endIdx := int(eoff >> 3) - sByteOff := soff - ((soff >> 3) << 3) - eByteOff := eoff - ((eoff >> 3) << 3) - - if headIdx == endIdx { - cnt = db.bCountByte(segment[headIdx], sByteOff, eByteOff) - } else { - cnt = db.bCountByte(segment[headIdx], sByteOff, 7) + - db.bCountByte(segment[endIdx], 0, eByteOff) - } - - // sum up following bytes - for idx, end := headIdx+1, endIdx-1; idx <= end; idx += 1 { - cnt += bitsInByte[segment[idx]] - if idx == end { - break - } - } - - return -} - -func (db *DB) BGet(key []byte) (data []byte, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if err = checkKeySize(key); err != nil { - return - } - - var ts, to int32 - if ts, to, err = db.bGetMeta(key); err != nil || ts < 0 { - return - } - - var tailSeq, tailOff = uint32(ts), uint32(to) - var capByteSize uint32 = db.bCapByteSize(tailSeq, tailOff) - data = make([]byte, capByteSize, capByteSize) - - minKey := db.bEncodeBinKey(key, minSeq) - maxKey := db.bEncodeBinKey(key, tailSeq) - it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose) - - var seq, s, e uint32 - for ; it.Valid(); it.Next() { - if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { - data = nil - break - } - - s = seq << segByteWidth - e = num.MinUint32(s+segByteSize, capByteSize) - copy(data[s:e], it.RawValue()) - } - it.Close() - - return -} - -func (db *DB) BDelete(key []byte) (drop int64, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if err = checkKeySize(key); err != nil { - return - } - - t := db.binBatch - t.Lock() - defer t.Unlock() - - drop = db.bDelete(t, key) - db.rmExpire(t, BitType, key) - - err = t.Commit() - return -} - -func (db *DB) BSetBit(key []byte, offset int32, val uint8) (ori uint8, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if err = checkKeySize(key); err != nil { - return - } - - // todo : check offset - var seq, off uint32 - if seq, off, err = db.bParseOffset(key, offset); err != nil { - return 0, err - } - - var bk, segment []byte - if bk, segment, err = db.bAllocateSegment(key, seq); err != nil { - return 0, err - } - - if segment != nil { - ori = getBit(segment, off) - if setBit(segment, off, val) { - t := db.binBatch - t.Lock() - defer t.Unlock() - - t.Put(bk, segment) - if _, _, e := db.bUpdateMeta(t, key, seq, off); e != nil { - err = e - return - } - - err = t.Commit() - } - } - - return -} - -func (db *DB) BMSetBit(key []byte, args ...BitPair) (place int64, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if err = checkKeySize(key); err != nil { - return - } - - // (ps : so as to aviod wasting memory copy while calling db.Get() and batch.Put(), - // here we sequence the params by pos, so that we can merge the execution of - // diff pos setting which targets on the same segment respectively. ) - - // #1 : sequence request data - var argCnt = len(args) - var bitInfos segBitInfoArray = make(segBitInfoArray, argCnt) - var seq, off uint32 - - for i, info := range args { - if seq, off, err = db.bParseOffset(key, info.Pos); err != nil { - return - } - - bitInfos[i].Seq = seq - bitInfos[i].Off = off - bitInfos[i].Val = info.Val - } - - sort.Sort(bitInfos) - - for i := 1; i < argCnt; i++ { - if bitInfos[i].Seq == bitInfos[i-1].Seq && bitInfos[i].Off == bitInfos[i-1].Off { - return 0, errDuplicatePos - } - } - - // #2 : execute bit set in order - t := db.binBatch - t.Lock() - defer t.Unlock() - - var curBinKey, curSeg []byte - var curSeq, maxSeq, maxOff uint32 - - for _, info := range bitInfos { - if curSeg != nil && info.Seq != curSeq { - t.Put(curBinKey, curSeg) - curSeg = nil - } - - if curSeg == nil { - curSeq = info.Seq - if curBinKey, curSeg, err = db.bAllocateSegment(key, info.Seq); err != nil { - return - } - - if curSeg == nil { - continue - } - } - - if setBit(curSeg, info.Off, info.Val) { - maxSeq = info.Seq - maxOff = info.Off - place++ - } - } - - if curSeg != nil { - t.Put(curBinKey, curSeg) - } - - // finally, update meta - if place > 0 { - if _, _, err = db.bUpdateMeta(t, key, maxSeq, maxOff); err != nil { - return - } - - err = t.Commit() - } - - return -} - -func (db *DB) BGetBit(key []byte, offset int32) (uint8, error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - - if seq, off, err := db.bParseOffset(key, offset); err != nil { - return 0, err - } else { - _, segment, err := db.bGetSegment(key, seq) - if err != nil { - return 0, err - } - - if segment == nil { - return 0, nil - } else { - return getBit(segment, off), nil - } - } -} - -// func (db *DB) BGetRange(key []byte, start int32, end int32) ([]byte, error) { -// section := make([]byte) +// import ( +// "encoding/binary" +// "errors" +// "github.com/siddontang/go/log" +// "github.com/siddontang/go/num" +// "github.com/siddontang/ledisdb/store" +// "sort" +// "time" +// ) + +// /* +// We will not maintain bitmap anymore, and will add bit operations for kv type later. +// Use your own risk. +// */ + +// const ( +// OPand uint8 = iota + 1 +// OPor +// OPxor +// OPnot +// ) + +// type BitPair struct { +// Pos int32 +// Val uint8 +// } + +// type segBitInfo struct { +// Seq uint32 +// Off uint32 +// Val uint8 +// } + +// type segBitInfoArray []segBitInfo + +// const ( +// // byte +// segByteWidth uint32 = 9 +// segByteSize uint32 = 1 << segByteWidth + +// // bit +// segBitWidth uint32 = segByteWidth + 3 +// segBitSize uint32 = segByteSize << 3 + +// maxByteSize uint32 = 8 << 20 +// maxSegCount uint32 = maxByteSize / segByteSize + +// minSeq uint32 = 0 +// maxSeq uint32 = uint32((maxByteSize << 3) - 1) +// ) + +// var fillBits = [...]uint8{1, 3, 7, 15, 31, 63, 127, 255} + +// var emptySegment []byte = make([]byte, segByteSize, segByteSize) + +// var fillSegment []byte = func() []byte { +// data := make([]byte, segByteSize, segByteSize) +// for i := uint32(0); i < segByteSize; i++ { +// data[i] = 0xff +// } +// return data +// }() + +// var errBinKey = errors.New("invalid bin key") +// var errOffset = errors.New("invalid offset") +// var errDuplicatePos = errors.New("duplicate bit pos") + +// func getBit(sz []byte, offset uint32) uint8 { +// index := offset >> 3 +// if index >= uint32(len(sz)) { +// return 0 // error("overflow") +// } + +// offset -= index << 3 +// return sz[index] >> offset & 1 +// } + +// func setBit(sz []byte, offset uint32, val uint8) bool { +// if val != 1 && val != 0 { +// return false // error("invalid val") +// } + +// index := offset >> 3 +// if index >= uint32(len(sz)) { +// return false // error("overflow") +// } + +// offset -= index << 3 +// if sz[index]>>offset&1 != val { +// sz[index] ^= (1 << offset) +// } +// return true +// } + +// func (datas segBitInfoArray) Len() int { +// return len(datas) +// } + +// func (datas segBitInfoArray) Less(i, j int) bool { +// res := (datas)[i].Seq < (datas)[j].Seq +// if !res && (datas)[i].Seq == (datas)[j].Seq { +// res = (datas)[i].Off < (datas)[j].Off +// } +// return res +// } + +// func (datas segBitInfoArray) Swap(i, j int) { +// datas[i], datas[j] = datas[j], datas[i] +// } + +// func (db *DB) bEncodeMetaKey(key []byte) []byte { +// mk := make([]byte, len(key)+2) +// mk[0] = db.index +// mk[1] = BitMetaType + +// copy(mk[2:], key) +// return mk +// } + +// func (db *DB) bDecodeMetaKey(bkey []byte) ([]byte, error) { +// if len(bkey) < 2 || bkey[0] != db.index || bkey[1] != BitMetaType { +// return nil, errBinKey +// } + +// return bkey[2:], nil +// } + +// func (db *DB) bEncodeBinKey(key []byte, seq uint32) []byte { +// bk := make([]byte, len(key)+8) + +// pos := 0 +// bk[pos] = db.index +// pos++ +// bk[pos] = BitType +// pos++ + +// binary.BigEndian.PutUint16(bk[pos:], uint16(len(key))) +// pos += 2 + +// copy(bk[pos:], key) +// pos += len(key) + +// binary.BigEndian.PutUint32(bk[pos:], seq) + +// return bk +// } + +// func (db *DB) bDecodeBinKey(bkey []byte) (key []byte, seq uint32, err error) { +// if len(bkey) < 8 || bkey[0] != db.index { +// err = errBinKey +// return +// } + +// keyLen := binary.BigEndian.Uint16(bkey[2:4]) +// if int(keyLen+8) != len(bkey) { +// err = errBinKey +// return +// } + +// key = bkey[4 : 4+keyLen] +// seq = uint32(binary.BigEndian.Uint32(bkey[4+keyLen:])) +// return +// } + +// func (db *DB) bCapByteSize(seq uint32, off uint32) uint32 { +// var offByteSize uint32 = (off >> 3) + 1 +// if offByteSize > segByteSize { +// offByteSize = segByteSize +// } + +// return seq<= 0 { +// offset += int32((uint32(tailSeq)<> segBitWidth +// off &= (segBitSize - 1) +// return +// } + +// func (db *DB) bGetMeta(key []byte) (tailSeq int32, tailOff int32, err error) { +// var v []byte + +// mk := db.bEncodeMetaKey(key) +// v, err = db.bucket.Get(mk) +// if err != nil { +// return +// } + +// if v != nil { +// tailSeq = int32(binary.LittleEndian.Uint32(v[0:4])) +// tailOff = int32(binary.LittleEndian.Uint32(v[4:8])) +// } else { +// tailSeq = -1 +// tailOff = -1 +// } +// return +// } + +// func (db *DB) bSetMeta(t *batch, key []byte, tailSeq uint32, tailOff uint32) { +// ek := db.bEncodeMetaKey(key) + +// buf := make([]byte, 8) +// binary.LittleEndian.PutUint32(buf[0:4], tailSeq) +// binary.LittleEndian.PutUint32(buf[4:8], tailOff) + +// t.Put(ek, buf) +// return +// } + +// func (db *DB) bUpdateMeta(t *batch, key []byte, seq uint32, off uint32) (tailSeq uint32, tailOff uint32, err error) { +// var tseq, toff int32 +// var update bool = false + +// if tseq, toff, err = db.bGetMeta(key); err != nil { +// return +// } else if tseq < 0 { +// update = true +// } else { +// tailSeq = uint32(num.MaxInt32(tseq, 0)) +// tailOff = uint32(num.MaxInt32(toff, 0)) +// update = (seq > tailSeq || (seq == tailSeq && off > tailOff)) +// } + +// if update { +// db.bSetMeta(t, key, seq, off) +// tailSeq = seq +// tailOff = off +// } +// return +// } + +// func (db *DB) bDelete(t *batch, key []byte) (drop int64) { +// mk := db.bEncodeMetaKey(key) +// t.Delete(mk) + +// minKey := db.bEncodeBinKey(key, minSeq) +// maxKey := db.bEncodeBinKey(key, maxSeq) +// it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose) +// for ; it.Valid(); it.Next() { +// t.Delete(it.RawKey()) +// drop++ +// } +// it.Close() + +// return drop +// } + +// func (db *DB) bGetSegment(key []byte, seq uint32) ([]byte, []byte, error) { +// bk := db.bEncodeBinKey(key, seq) +// segment, err := db.bucket.Get(bk) +// if err != nil { +// return bk, nil, err +// } +// return bk, segment, nil +// } + +// func (db *DB) bAllocateSegment(key []byte, seq uint32) ([]byte, []byte, error) { +// bk, segment, err := db.bGetSegment(key, seq) +// if err == nil && segment == nil { +// segment = make([]byte, segByteSize, segByteSize) +// } +// return bk, segment, err +// } + +// func (db *DB) bIterator(key []byte) *store.RangeLimitIterator { +// sk := db.bEncodeBinKey(key, minSeq) +// ek := db.bEncodeBinKey(key, maxSeq) +// return db.bucket.RangeIterator(sk, ek, store.RangeClose) +// } + +// func (db *DB) bSegAnd(a []byte, b []byte, res *[]byte) { +// if a == nil || b == nil { +// *res = nil +// return +// } + +// data := *res +// if data == nil { +// data = make([]byte, segByteSize, segByteSize) +// *res = data +// } + +// for i := uint32(0); i < segByteSize; i++ { +// data[i] = a[i] & b[i] +// } +// return +// } + +// func (db *DB) bSegOr(a []byte, b []byte, res *[]byte) { +// if a == nil || b == nil { +// if a == nil && b == nil { +// *res = nil +// } else if a == nil { +// *res = b +// } else { +// *res = a +// } +// return +// } + +// data := *res +// if data == nil { +// data = make([]byte, segByteSize, segByteSize) +// *res = data +// } + +// for i := uint32(0); i < segByteSize; i++ { +// data[i] = a[i] | b[i] +// } +// return +// } + +// func (db *DB) bSegXor(a []byte, b []byte, res *[]byte) { +// if a == nil && b == nil { +// *res = fillSegment +// return +// } + +// if a == nil { +// a = emptySegment +// } + +// if b == nil { +// b = emptySegment +// } + +// data := *res +// if data == nil { +// data = make([]byte, segByteSize, segByteSize) +// *res = data +// } + +// for i := uint32(0); i < segByteSize; i++ { +// data[i] = a[i] ^ b[i] +// } // return // } -func (db *DB) BCount(key []byte, start int32, end int32) (cnt int32, err error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") +// func (db *DB) bExpireAt(key []byte, when int64) (int64, error) { +// t := db.binBatch +// t.Lock() +// defer t.Unlock() - var sseq, soff uint32 - if sseq, soff, err = db.bParseOffset(key, start); err != nil { - return - } +// if seq, _, err := db.bGetMeta(key); err != nil || seq < 0 { +// return 0, err +// } else { +// db.expireAt(t, BitType, key, when) +// if err := t.Commit(); err != nil { +// return 0, err +// } +// } +// return 1, nil +// } - var eseq, eoff uint32 - if eseq, eoff, err = db.bParseOffset(key, end); err != nil { - return - } +// func (db *DB) bCountByte(val byte, soff uint32, eoff uint32) int32 { +// if soff > eoff { +// soff, eoff = eoff, soff +// } - if sseq > eseq || (sseq == eseq && soff > eoff) { - sseq, eseq = eseq, sseq - soff, eoff = eoff, soff - } +// mask := uint8(0) +// if soff > 0 { +// mask |= fillBits[soff-1] +// } +// if eoff < 7 { +// mask |= (fillBits[7] ^ fillBits[eoff]) +// } +// mask = fillBits[7] ^ mask - var segCnt int32 - if eseq == sseq { - if segCnt, err = db.bCountSeg(key, sseq, soff, eoff); err != nil { - return 0, err - } +// return bitsInByte[val&mask] +// } - cnt = segCnt +// func (db *DB) bCountSeg(key []byte, seq uint32, soff uint32, eoff uint32) (cnt int32, err error) { +// if soff >= segBitSize || soff < 0 || +// eoff >= segBitSize || eoff < 0 { +// return +// } - } else { - if segCnt, err = db.bCountSeg(key, sseq, soff, segBitSize-1); err != nil { - return 0, err - } else { - cnt += segCnt - } +// var segment []byte +// if _, segment, err = db.bGetSegment(key, seq); err != nil { +// return +// } - if segCnt, err = db.bCountSeg(key, eseq, 0, eoff); err != nil { - return 0, err - } else { - cnt += segCnt - } - } +// if segment == nil { +// return +// } - // middle segs - var segment []byte - skey := db.bEncodeBinKey(key, sseq) - ekey := db.bEncodeBinKey(key, eseq) +// if soff > eoff { +// soff, eoff = eoff, soff +// } - it := db.bucket.RangeIterator(skey, ekey, store.RangeOpen) - for ; it.Valid(); it.Next() { - segment = it.RawValue() - for _, bt := range segment { - cnt += bitsInByte[bt] - } - } - it.Close() +// headIdx := int(soff >> 3) +// endIdx := int(eoff >> 3) +// sByteOff := soff - ((soff >> 3) << 3) +// eByteOff := eoff - ((eoff >> 3) << 3) - return -} +// if headIdx == endIdx { +// cnt = db.bCountByte(segment[headIdx], sByteOff, eByteOff) +// } else { +// cnt = db.bCountByte(segment[headIdx], sByteOff, 7) + +// db.bCountByte(segment[endIdx], 0, eByteOff) +// } -func (db *DB) BTail(key []byte) (int32, error) { - log.Error("bitmap type will be deprecated later, please use bit operations in kv type") +// // sum up following bytes +// for idx, end := headIdx+1, endIdx-1; idx <= end; idx += 1 { +// cnt += bitsInByte[segment[idx]] +// if idx == end { +// break +// } +// } - // effective length of data, the highest bit-pos set in history - tailSeq, tailOff, err := db.bGetMeta(key) - if err != nil { - return 0, err - } +// return +// } - tail := int32(-1) - if tailSeq >= 0 { - tail = int32(uint32(tailSeq)< maxDstSeq || (seq == maxDstSeq && off > maxDstOff) { - maxDstSeq = seq - maxDstOff = off - } - } +// t := db.binBatch +// t.Lock() +// defer t.Unlock() - if (op == OPnot && validKeyNum != 1) || - (op != OPnot && validKeyNum < 2) { - return // with not enough existing source key - } +// drop = db.bDelete(t, key) +// db.rmExpire(t, BitType, key) - var srcIdx int - for srcIdx = 0; srcIdx < keyNum; srcIdx++ { - if srckeys[srcIdx] != nil { - break - } - } +// err = t.Commit() +// return +// } - // init - data - var segments = make([][]byte, maxDstSeq+1) +// func (db *DB) BSetBit(key []byte, offset int32, val uint8) (ori uint8, err error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - if op == OPnot { - // ps : - // ( ~num == num ^ 0x11111111 ) - // we init the result segments with all bit set, - // then we can calculate through the way of 'xor'. +// if err = checkKeySize(key); err != nil { +// return +// } - // ahead segments bin format : 1111 ... 1111 - for i := uint32(0); i < maxDstSeq; i++ { - segments[i] = fillSegment - } +// // todo : check offset +// var seq, off uint32 +// if seq, off, err = db.bParseOffset(key, offset); err != nil { +// return 0, err +// } - // last segment bin format : 1111..1100..0000 - var tailSeg = make([]byte, segByteSize, segByteSize) - var fillByte = fillBits[7] - var tailSegLen = db.bCapByteSize(uint32(0), maxDstOff) - for i := uint32(0); i < tailSegLen-1; i++ { - tailSeg[i] = fillByte - } - tailSeg[tailSegLen-1] = fillBits[maxDstOff-(tailSegLen-1)<<3] - segments[maxDstSeq] = tailSeg +// var bk, segment []byte +// if bk, segment, err = db.bAllocateSegment(key, seq); err != nil { +// return 0, err +// } - } else { - // ps : init segments by data corresponding to the 1st valid source key - it := db.bIterator(srckeys[srcIdx]) - for ; it.Valid(); it.Next() { - if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { - // to do ... - it.Close() - return - } - segments[seq] = it.Value() - } - it.Close() - srcIdx++ - } +// if segment != nil { +// ori = getBit(segment, off) +// if setBit(segment, off, val) { +// t := db.binBatch +// t.Lock() +// defer t.Unlock() - // operation with following keys - var res []byte - for i := srcIdx; i < keyNum; i++ { - if srckeys[i] == nil { - continue - } +// t.Put(bk, segment) +// if _, _, e := db.bUpdateMeta(t, key, seq, off); e != nil { +// err = e +// return +// } - it := db.bIterator(srckeys[i]) - for idx, end := uint32(0), false; !end; it.Next() { - end = !it.Valid() - if !end { - if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { - // to do ... - it.Close() - return - } - } else { - seq = maxDstSeq + 1 - } +// err = t.Commit() +// } +// } - // todo : - // operation 'and' can be optimize here : - // if seq > max_segments_idx, this loop can be break, - // which can avoid cost from Key() and bDecodeBinKey() +// return +// } - for ; idx < seq; idx++ { - res = nil - exeOp(segments[idx], nil, &res) - segments[idx] = res - } +// func (db *DB) BMSetBit(key []byte, args ...BitPair) (place int64, err error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") - if !end { - res = it.Value() - exeOp(segments[seq], res, &res) - segments[seq] = res - idx++ - } - } - it.Close() - } +// if err = checkKeySize(key); err != nil { +// return +// } - // clear the old data in case - db.bDelete(t, dstkey) - db.rmExpire(t, BitType, dstkey) +// // (ps : so as to aviod wasting memory copy while calling db.Get() and batch.Put(), +// // here we sequence the params by pos, so that we can merge the execution of +// // diff pos setting which targets on the same segment respectively. ) - // set data - db.bSetMeta(t, dstkey, maxDstSeq, maxDstOff) +// // #1 : sequence request data +// var argCnt = len(args) +// var bitInfos segBitInfoArray = make(segBitInfoArray, argCnt) +// var seq, off uint32 - var bk []byte - for seq, segt := range segments { - if segt != nil { - bk = db.bEncodeBinKey(dstkey, uint32(seq)) - t.Put(bk, segt) - } - } +// for i, info := range args { +// if seq, off, err = db.bParseOffset(key, info.Pos); err != nil { +// return +// } - err = t.Commit() - if err == nil { - // blen = int32(db.bCapByteSize(maxDstOff, maxDstOff)) - blen = int32(maxDstSeq< 0 { +// if _, _, err = db.bUpdateMeta(t, key, maxSeq, maxOff); err != nil { +// return +// } - t := db.binBatch - t.Lock() - defer t.Unlock() +// err = t.Commit() +// } - n, err := db.rmExpire(t, BitType, key) - if err != nil { - return 0, err - } +// return +// } - err = t.Commit() - return n, err -} +// func (db *DB) BGetBit(key []byte, offset int32) (uint8, error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") -func (db *DB) bFlush() (drop int64, err error) { - t := db.binBatch - t.Lock() - defer t.Unlock() +// if seq, off, err := db.bParseOffset(key, offset); err != nil { +// return 0, err +// } else { +// _, segment, err := db.bGetSegment(key, seq) +// if err != nil { +// return 0, err +// } - return db.flushType(t, BitType) -} +// if segment == nil { +// return 0, nil +// } else { +// return getBit(segment, off), nil +// } +// } +// } + +// // func (db *DB) BGetRange(key []byte, start int32, end int32) ([]byte, error) { +// // section := make([]byte) + +// // return +// // } + +// func (db *DB) BCount(key []byte, start int32, end int32) (cnt int32, err error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") + +// var sseq, soff uint32 +// if sseq, soff, err = db.bParseOffset(key, start); err != nil { +// return +// } + +// var eseq, eoff uint32 +// if eseq, eoff, err = db.bParseOffset(key, end); err != nil { +// return +// } + +// if sseq > eseq || (sseq == eseq && soff > eoff) { +// sseq, eseq = eseq, sseq +// soff, eoff = eoff, soff +// } + +// var segCnt int32 +// if eseq == sseq { +// if segCnt, err = db.bCountSeg(key, sseq, soff, eoff); err != nil { +// return 0, err +// } + +// cnt = segCnt + +// } else { +// if segCnt, err = db.bCountSeg(key, sseq, soff, segBitSize-1); err != nil { +// return 0, err +// } else { +// cnt += segCnt +// } + +// if segCnt, err = db.bCountSeg(key, eseq, 0, eoff); err != nil { +// return 0, err +// } else { +// cnt += segCnt +// } +// } + +// // middle segs +// var segment []byte +// skey := db.bEncodeBinKey(key, sseq) +// ekey := db.bEncodeBinKey(key, eseq) + +// it := db.bucket.RangeIterator(skey, ekey, store.RangeOpen) +// for ; it.Valid(); it.Next() { +// segment = it.RawValue() +// for _, bt := range segment { +// cnt += bitsInByte[bt] +// } +// } +// it.Close() + +// return +// } + +// func (db *DB) BTail(key []byte) (int32, error) { +// log.Error("bitmap type will be deprecated later, please use bit operations in kv type") + +// // effective length of data, the highest bit-pos set in history +// tailSeq, tailOff, err := db.bGetMeta(key) +// if err != nil { +// return 0, err +// } + +// tail := int32(-1) +// if tailSeq >= 0 { +// tail = int32(uint32(tailSeq)< maxDstSeq || (seq == maxDstSeq && off > maxDstOff) { +// maxDstSeq = seq +// maxDstOff = off +// } +// } + +// if (op == OPnot && validKeyNum != 1) || +// (op != OPnot && validKeyNum < 2) { +// return // with not enough existing source key +// } + +// var srcIdx int +// for srcIdx = 0; srcIdx < keyNum; srcIdx++ { +// if srckeys[srcIdx] != nil { +// break +// } +// } + +// // init - data +// var segments = make([][]byte, maxDstSeq+1) + +// if op == OPnot { +// // ps : +// // ( ~num == num ^ 0x11111111 ) +// // we init the result segments with all bit set, +// // then we can calculate through the way of 'xor'. + +// // ahead segments bin format : 1111 ... 1111 +// for i := uint32(0); i < maxDstSeq; i++ { +// segments[i] = fillSegment +// } + +// // last segment bin format : 1111..1100..0000 +// var tailSeg = make([]byte, segByteSize, segByteSize) +// var fillByte = fillBits[7] +// var tailSegLen = db.bCapByteSize(uint32(0), maxDstOff) +// for i := uint32(0); i < tailSegLen-1; i++ { +// tailSeg[i] = fillByte +// } +// tailSeg[tailSegLen-1] = fillBits[maxDstOff-(tailSegLen-1)<<3] +// segments[maxDstSeq] = tailSeg + +// } else { +// // ps : init segments by data corresponding to the 1st valid source key +// it := db.bIterator(srckeys[srcIdx]) +// for ; it.Valid(); it.Next() { +// if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { +// // to do ... +// it.Close() +// return +// } +// segments[seq] = it.Value() +// } +// it.Close() +// srcIdx++ +// } + +// // operation with following keys +// var res []byte +// for i := srcIdx; i < keyNum; i++ { +// if srckeys[i] == nil { +// continue +// } + +// it := db.bIterator(srckeys[i]) +// for idx, end := uint32(0), false; !end; it.Next() { +// end = !it.Valid() +// if !end { +// if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil { +// // to do ... +// it.Close() +// return +// } +// } else { +// seq = maxDstSeq + 1 +// } + +// // todo : +// // operation 'and' can be optimize here : +// // if seq > max_segments_idx, this loop can be break, +// // which can avoid cost from Key() and bDecodeBinKey() + +// for ; idx < seq; idx++ { +// res = nil +// exeOp(segments[idx], nil, &res) +// segments[idx] = res +// } + +// if !end { +// res = it.Value() +// exeOp(segments[seq], res, &res) +// segments[seq] = res +// idx++ +// } +// } +// it.Close() +// } + +// // clear the old data in case +// db.bDelete(t, dstkey) +// db.rmExpire(t, BitType, dstkey) + +// // set data +// db.bSetMeta(t, dstkey, maxDstSeq, maxDstOff) + +// var bk []byte +// for seq, segt := range segments { +// if segt != nil { +// bk = db.bEncodeBinKey(dstkey, uint32(seq)) +// t.Put(bk, segt) +// } +// } + +// err = t.Commit() +// if err == nil { +// // blen = int32(db.bCapByteSize(maxDstOff, maxDstOff)) +// blen = int32(maxDstSeq<>1) - for i := 0; i < len(pairs); i++ { - offset, err = ledis.StrInt32(args[i<<1], nil) +// pairs := make([]ledis.BitPair, len(args)>>1) +// for i := 0; i < len(pairs); i++ { +// offset, err = ledis.StrInt32(args[i<<1], nil) - if err != nil { - return ErrOffset - } +// if err != nil { +// return ErrOffset +// } - val, err = ledis.StrInt8(args[i<<1+1], nil) - if val != 0 && val != 1 { - return ErrBool - } +// val, err = ledis.StrInt8(args[i<<1+1], nil) +// if val != 0 && val != 1 { +// return ErrBool +// } - if err != nil { - return ErrBool - } +// if err != nil { +// return ErrBool +// } - pairs[i].Pos = offset - pairs[i].Val = uint8(val) - } +// pairs[i].Pos = offset +// pairs[i].Val = uint8(val) +// } - if place, err := c.db.BMSetBit(key, pairs...); err != nil { - return err - } else { - c.resp.writeInteger(place) - } - return nil -} +// if place, err := c.db.BMSetBit(key, pairs...); err != nil { +// return err +// } else { +// c.resp.writeInteger(place) +// } +// return nil +// } -func bcountCommand(c *client) error { - args := c.args - argCnt := len(args) +// func bcountCommand(c *client) error { +// args := c.args +// argCnt := len(args) - if !(argCnt > 0 && argCnt <= 3) { - return ErrCmdParams - } +// if !(argCnt > 0 && argCnt <= 3) { +// return ErrCmdParams +// } - // BCount(key []byte, start int32, end int32) (cnt int32, err error) { +// // BCount(key []byte, start int32, end int32) (cnt int32, err error) { - var err error - var start, end int32 = 0, -1 +// var err error +// var start, end int32 = 0, -1 - if argCnt > 1 { - start, err = ledis.StrInt32(args[1], nil) - if err != nil { - return ErrValue - } - } +// if argCnt > 1 { +// start, err = ledis.StrInt32(args[1], nil) +// if err != nil { +// return ErrValue +// } +// } - if argCnt > 2 { - end, err = ledis.StrInt32(args[2], nil) - if err != nil { - return ErrValue - } - } +// if argCnt > 2 { +// end, err = ledis.StrInt32(args[2], nil) +// if err != nil { +// return ErrValue +// } +// } - if cnt, err := c.db.BCount(args[0], start, end); err != nil { - return err - } else { - c.resp.writeInteger(int64(cnt)) - } - return nil -} +// if cnt, err := c.db.BCount(args[0], start, end); err != nil { +// return err +// } else { +// c.resp.writeInteger(int64(cnt)) +// } +// return nil +// } -func boptCommand(c *client) error { - args := c.args - if len(args) < 2 { - return ErrCmdParams - } +// func boptCommand(c *client) error { +// args := c.args +// if len(args) < 2 { +// return ErrCmdParams +// } - opDesc := strings.ToLower(hack.String(args[0])) - dstKey := args[1] - srcKeys := args[2:] +// opDesc := strings.ToLower(hack.String(args[0])) +// dstKey := args[1] +// srcKeys := args[2:] - var op uint8 - switch opDesc { - case "and": - op = ledis.OPand - case "or": - op = ledis.OPor - case "xor": - op = ledis.OPxor - case "not": - op = ledis.OPnot - default: - return ErrCmdParams - } +// var op uint8 +// switch opDesc { +// case "and": +// op = ledis.OPand +// case "or": +// op = ledis.OPor +// case "xor": +// op = ledis.OPxor +// case "not": +// op = ledis.OPnot +// default: +// return ErrCmdParams +// } - if len(srcKeys) == 0 { - return ErrCmdParams - } - if blen, err := c.db.BOperation(op, dstKey, srcKeys...); err != nil { - return err - } else { - c.resp.writeInteger(int64(blen)) - } - return nil -} +// if len(srcKeys) == 0 { +// return ErrCmdParams +// } +// if blen, err := c.db.BOperation(op, dstKey, srcKeys...); err != nil { +// return err +// } else { +// c.resp.writeInteger(int64(blen)) +// } +// return nil +// } -func bexpireCommand(c *client) error { - args := c.args - if len(args) != 2 { - return ErrCmdParams - } +// func bexpireCommand(c *client) error { +// args := c.args +// if len(args) != 2 { +// return ErrCmdParams +// } - duration, err := ledis.StrInt64(args[1], nil) - if err != nil { - return ErrValue - } +// duration, err := ledis.StrInt64(args[1], nil) +// if err != nil { +// return ErrValue +// } - if v, err := c.db.BExpire(args[0], duration); err != nil { - return err - } else { - c.resp.writeInteger(v) - } +// if v, err := c.db.BExpire(args[0], duration); err != nil { +// return err +// } else { +// c.resp.writeInteger(v) +// } - return nil -} +// return nil +// } -func bexpireAtCommand(c *client) error { - args := c.args - if len(args) != 2 { - return ErrCmdParams - } +// func bexpireAtCommand(c *client) error { +// args := c.args +// if len(args) != 2 { +// return ErrCmdParams +// } - when, err := ledis.StrInt64(args[1], nil) - if err != nil { - return ErrValue - } +// when, err := ledis.StrInt64(args[1], nil) +// if err != nil { +// return ErrValue +// } - if v, err := c.db.BExpireAt(args[0], when); err != nil { - return err - } else { - c.resp.writeInteger(v) - } +// if v, err := c.db.BExpireAt(args[0], when); err != nil { +// return err +// } else { +// c.resp.writeInteger(v) +// } - return nil -} +// return nil +// } -func bttlCommand(c *client) error { - args := c.args - if len(args) != 1 { - return ErrCmdParams - } +// func bttlCommand(c *client) error { +// args := c.args +// if len(args) != 1 { +// return ErrCmdParams +// } - if v, err := c.db.BTTL(args[0]); err != nil { - return err - } else { - c.resp.writeInteger(v) - } +// if v, err := c.db.BTTL(args[0]); err != nil { +// return err +// } else { +// c.resp.writeInteger(v) +// } - return nil -} +// return nil +// } -func bpersistCommand(c *client) error { - args := c.args - if len(args) != 1 { - return ErrCmdParams - } +// func bpersistCommand(c *client) error { +// args := c.args +// if len(args) != 1 { +// return ErrCmdParams +// } - if n, err := c.db.BPersist(args[0]); err != nil { - return err - } else { - c.resp.writeInteger(n) - } +// if n, err := c.db.BPersist(args[0]); err != nil { +// return err +// } else { +// c.resp.writeInteger(n) +// } - return nil -} +// return nil +// } -func init() { - register("bget", bgetCommand) - register("bdelete", bdeleteCommand) - register("bsetbit", bsetbitCommand) - register("bgetbit", bgetbitCommand) - register("bmsetbit", bmsetbitCommand) - register("bcount", bcountCommand) - register("bopt", boptCommand) - register("bexpire", bexpireCommand) - register("bexpireat", bexpireAtCommand) - register("bttl", bttlCommand) - register("bpersist", bpersistCommand) -} +// func init() { +// register("bget", bgetCommand) +// register("bdelete", bdeleteCommand) +// register("bsetbit", bsetbitCommand) +// register("bgetbit", bgetbitCommand) +// register("bmsetbit", bmsetbitCommand) +// register("bcount", bcountCommand) +// register("bopt", boptCommand) +// register("bexpire", bexpireCommand) +// register("bexpireat", bexpireAtCommand) +// register("bttl", bttlCommand) +// register("bpersist", bpersistCommand) +// }