2017-03-12 13:05:35 +03:00
|
|
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
|
|
// All rights reserved.
|
|
|
|
//
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
|
|
package leveldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/memdb"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
|
|
|
)
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
// ErrBatchCorrupted records reason of batch corruption.
|
2017-03-12 13:05:35 +03:00
|
|
|
type ErrBatchCorrupted struct {
|
|
|
|
Reason string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *ErrBatchCorrupted) Error() string {
|
|
|
|
return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
|
|
|
|
}
|
|
|
|
|
|
|
|
func newErrBatchCorrupted(reason string) error {
|
|
|
|
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2017-03-14 16:55:16 +03:00
|
|
|
batchHdrLen = 8 + 4
|
|
|
|
batchGrowRec = 3000
|
2017-03-12 13:05:35 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// BatchReplay wraps basic batch operations.
|
|
|
|
type BatchReplay interface {
|
|
|
|
Put(key, value []byte)
|
|
|
|
Delete(key []byte)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Batch is a write batch.
|
|
|
|
type Batch struct {
|
2017-03-14 16:55:16 +03:00
|
|
|
data []byte
|
|
|
|
rLen, bLen int
|
|
|
|
seq uint64
|
|
|
|
sync bool
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *Batch) grow(n int) {
|
2017-03-14 16:55:16 +03:00
|
|
|
off := len(b.data)
|
|
|
|
if off == 0 {
|
|
|
|
off = batchHdrLen
|
|
|
|
if b.data != nil {
|
|
|
|
b.data = b.data[:off]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if cap(b.data)-off < n {
|
|
|
|
if b.data == nil {
|
|
|
|
b.data = make([]byte, off, off+n)
|
|
|
|
} else {
|
|
|
|
odata := b.data
|
|
|
|
div := 1
|
|
|
|
if b.rLen > batchGrowRec {
|
|
|
|
div = b.rLen / batchGrowRec
|
|
|
|
}
|
|
|
|
b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
|
|
|
|
copy(b.data, odata)
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *Batch) appendRec(kt keyType, key, value []byte) {
|
|
|
|
n := 1 + binary.MaxVarintLen32 + len(key)
|
|
|
|
if kt == keyTypeVal {
|
|
|
|
n += binary.MaxVarintLen32 + len(value)
|
|
|
|
}
|
|
|
|
b.grow(n)
|
2017-03-14 16:55:16 +03:00
|
|
|
off := len(b.data)
|
|
|
|
data := b.data[:off+n]
|
|
|
|
data[off] = byte(kt)
|
|
|
|
off++
|
|
|
|
off += binary.PutUvarint(data[off:], uint64(len(key)))
|
|
|
|
copy(data[off:], key)
|
|
|
|
off += len(key)
|
2017-03-12 13:05:35 +03:00
|
|
|
if kt == keyTypeVal {
|
2017-03-14 16:55:16 +03:00
|
|
|
off += binary.PutUvarint(data[off:], uint64(len(value)))
|
|
|
|
copy(data[off:], value)
|
|
|
|
off += len(value)
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
2017-03-14 16:55:16 +03:00
|
|
|
b.data = data[:off]
|
|
|
|
b.rLen++
|
|
|
|
// Include 8-byte ikey header
|
|
|
|
b.bLen += len(key) + len(value) + 8
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Put appends 'put operation' of the given key/value pair to the batch.
|
2017-03-14 16:55:16 +03:00
|
|
|
// It is safe to modify the contents of the argument after Put returns.
|
2017-03-12 13:05:35 +03:00
|
|
|
func (b *Batch) Put(key, value []byte) {
|
|
|
|
b.appendRec(keyTypeVal, key, value)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete appends 'delete operation' of the given key to the batch.
|
2017-03-14 16:55:16 +03:00
|
|
|
// It is safe to modify the contents of the argument after Delete returns.
|
2017-03-12 13:05:35 +03:00
|
|
|
func (b *Batch) Delete(key []byte) {
|
|
|
|
b.appendRec(keyTypeDel, key, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Dump dumps batch contents. The returned slice can be loaded into the
|
|
|
|
// batch using Load method.
|
|
|
|
// The returned slice is not its own copy, so the contents should not be
|
|
|
|
// modified.
|
|
|
|
func (b *Batch) Dump() []byte {
|
2017-03-14 16:55:16 +03:00
|
|
|
return b.encode()
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load loads given slice into the batch. Previous contents of the batch
|
|
|
|
// will be discarded.
|
|
|
|
// The given slice will not be copied and will be used as batch buffer, so
|
|
|
|
// it is not safe to modify the contents of the slice.
|
|
|
|
func (b *Batch) Load(data []byte) error {
|
2017-03-14 16:55:16 +03:00
|
|
|
return b.decode(0, data)
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Replay replays batch contents.
|
|
|
|
func (b *Batch) Replay(r BatchReplay) error {
|
2017-03-14 16:55:16 +03:00
|
|
|
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
|
|
|
switch kt {
|
2017-03-12 13:05:35 +03:00
|
|
|
case keyTypeVal:
|
2017-03-14 16:55:16 +03:00
|
|
|
r.Put(key, value)
|
2017-03-12 13:05:35 +03:00
|
|
|
case keyTypeDel:
|
2017-03-14 16:55:16 +03:00
|
|
|
r.Delete(key)
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
2017-03-14 16:55:16 +03:00
|
|
|
return nil
|
|
|
|
})
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Len returns number of records in the batch.
|
|
|
|
func (b *Batch) Len() int {
|
2017-03-14 16:55:16 +03:00
|
|
|
return b.rLen
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reset resets the batch.
|
|
|
|
func (b *Batch) Reset() {
|
|
|
|
b.data = b.data[:0]
|
2017-03-14 16:55:16 +03:00
|
|
|
b.seq = 0
|
|
|
|
b.rLen = 0
|
|
|
|
b.bLen = 0
|
|
|
|
b.sync = false
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
func (b *Batch) init(sync bool) {
|
|
|
|
b.sync = sync
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *Batch) append(p *Batch) {
|
2017-03-14 16:55:16 +03:00
|
|
|
if p.rLen > 0 {
|
|
|
|
b.grow(len(p.data) - batchHdrLen)
|
|
|
|
b.data = append(b.data, p.data[batchHdrLen:]...)
|
|
|
|
b.rLen += p.rLen
|
|
|
|
b.bLen += p.bLen
|
|
|
|
}
|
|
|
|
if p.sync {
|
|
|
|
b.sync = true
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
// size returns sums of key/value pair length plus 8-bytes ikey.
|
|
|
|
func (b *Batch) size() int {
|
|
|
|
return b.bLen
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
func (b *Batch) encode() []byte {
|
|
|
|
b.grow(0)
|
|
|
|
binary.LittleEndian.PutUint64(b.data, b.seq)
|
|
|
|
binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
|
|
|
|
|
|
|
|
return b.data
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
func (b *Batch) decode(prevSeq uint64, data []byte) error {
|
|
|
|
if len(data) < batchHdrLen {
|
|
|
|
return newErrBatchCorrupted("too short")
|
|
|
|
}
|
|
|
|
|
|
|
|
b.seq = binary.LittleEndian.Uint64(data)
|
|
|
|
if b.seq < prevSeq {
|
|
|
|
return newErrBatchCorrupted("invalid sequence number")
|
|
|
|
}
|
|
|
|
b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
|
|
|
|
if b.rLen < 0 {
|
|
|
|
return newErrBatchCorrupted("invalid records length")
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
2017-03-14 16:55:16 +03:00
|
|
|
// No need to be precise at this point, it won't be used anyway
|
|
|
|
b.bLen = len(data) - batchHdrLen
|
|
|
|
b.data = data
|
|
|
|
|
2017-03-12 13:05:35 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
|
|
|
|
off := batchHdrLen
|
|
|
|
for i := 0; i < b.rLen; i++ {
|
|
|
|
if off >= len(b.data) {
|
|
|
|
return newErrBatchCorrupted("invalid records length")
|
|
|
|
}
|
2017-03-12 13:05:35 +03:00
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
kt := keyType(b.data[off])
|
|
|
|
if kt > keyTypeVal {
|
|
|
|
panic(kt)
|
|
|
|
return newErrBatchCorrupted("bad record: invalid type")
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
2017-03-14 16:55:16 +03:00
|
|
|
off++
|
2017-03-12 13:05:35 +03:00
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
x, n := binary.Uvarint(b.data[off:])
|
|
|
|
off += n
|
|
|
|
if n <= 0 || off+int(x) > len(b.data) {
|
2017-03-12 13:05:35 +03:00
|
|
|
return newErrBatchCorrupted("bad record: invalid key length")
|
|
|
|
}
|
2017-03-14 16:55:16 +03:00
|
|
|
key := b.data[off : off+int(x)]
|
|
|
|
off += int(x)
|
|
|
|
var value []byte
|
|
|
|
if kt == keyTypeVal {
|
|
|
|
x, n := binary.Uvarint(b.data[off:])
|
|
|
|
off += n
|
|
|
|
if n <= 0 || off+int(x) > len(b.data) {
|
2017-03-12 13:05:35 +03:00
|
|
|
return newErrBatchCorrupted("bad record: invalid value length")
|
|
|
|
}
|
2017-03-14 16:55:16 +03:00
|
|
|
value = b.data[off : off+int(x)]
|
|
|
|
off += int(x)
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
if err := f(i, kt, key, value); err != nil {
|
2017-03-12 13:05:35 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-03-14 16:55:16 +03:00
|
|
|
|
2017-03-12 13:05:35 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
func (b *Batch) memReplay(to *memdb.DB) error {
|
|
|
|
var ikScratch []byte
|
|
|
|
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
|
|
|
ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
|
|
|
|
return to.Put(ikScratch, value)
|
2017-03-12 13:05:35 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
|
|
|
|
if err := b.decode(prevSeq, data); err != nil {
|
|
|
|
return err
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
2017-03-14 16:55:16 +03:00
|
|
|
return b.memReplay(to)
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|
|
|
|
|
2017-03-14 16:55:16 +03:00
|
|
|
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
|
|
|
var ikScratch []byte
|
|
|
|
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
|
|
|
ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
|
|
|
|
return to.Delete(ikScratch)
|
|
|
|
})
|
2017-03-12 13:05:35 +03:00
|
|
|
}
|