ledisdb/rpl/file_table.go

644 lines
12 KiB
Go
Raw Normal View History

2014-11-05 12:34:58 +03:00
package rpl
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
2014-11-06 14:35:13 +03:00
"github.com/edsrzf/mmap-go"
2014-11-05 12:34:58 +03:00
"github.com/siddontang/go/log"
"github.com/siddontang/go/num"
"github.com/siddontang/go/sync2"
"io"
"os"
"path"
"reflect"
2014-11-05 12:34:58 +03:00
"sync"
"time"
)
var (
magic = []byte("\x1c\x1d\xb8\x88\xff\x9e\x45\x55\x40\xf0\x4c\xda\xe0\xce\x47\xde\x65\x48\x71\x17")
log0 = Log{0, 1, 1, []byte("ledisdb")}
log0Data = []byte{}
2014-11-05 12:34:58 +03:00
errTableNeedFlush = errors.New("write table need flush")
2014-11-11 10:20:26 +03:00
errNilHandler = errors.New("nil write handler")
pageSize = int64(4096)
2014-11-05 12:34:58 +03:00
)
func init() {
log0Data, _ = log0.Marshal()
pageSize = int64(os.Getpagesize())
}
2014-11-05 12:34:58 +03:00
const tableReaderKeepaliveInterval int64 = 30
func fmtTableName(index int64) string {
2014-11-06 08:46:02 +03:00
return fmt.Sprintf("%08d.ldb", index)
2014-11-05 12:34:58 +03:00
}
type tableReader struct {
sync.Mutex
name string
index int64
2014-11-06 14:35:13 +03:00
f *os.File
m mmap.MMap
pf *os.File
2014-11-05 12:34:58 +03:00
first uint64
last uint64
2014-11-06 10:05:21 +03:00
lastTime uint32
2014-11-05 12:34:58 +03:00
offsetStartPos int64
offsetLen uint32
lastReadTime sync2.AtomicInt64
}
2014-11-06 10:05:21 +03:00
func newTableReader(base string, index int64) (*tableReader, error) {
2014-11-06 16:52:18 +03:00
if index <= 0 {
return nil, fmt.Errorf("invalid index %d", index)
}
2014-11-05 12:34:58 +03:00
t := new(tableReader)
2014-11-06 10:05:21 +03:00
t.name = path.Join(base, fmtTableName(index))
t.index = index
2014-11-05 12:34:58 +03:00
var err error
if err = t.check(); err != nil {
2014-11-06 10:05:21 +03:00
log.Error("check %s error: %s, try to repair", t.name, err.Error())
2014-11-05 12:34:58 +03:00
if err = t.repair(); err != nil {
2014-11-06 10:05:21 +03:00
log.Error("repair %s error: %s", t.name, err.Error())
2014-11-05 12:34:58 +03:00
return nil, err
}
}
t.close()
return t, nil
}
func (t *tableReader) Close() {
t.Lock()
defer t.Unlock()
t.close()
}
func (t *tableReader) close() {
2014-11-06 14:35:13 +03:00
if t.m != nil {
t.m.Unmap()
t.m = nil
2014-11-05 12:34:58 +03:00
}
if t.f != nil {
t.f.Close()
t.f = nil
}
}
func (t *tableReader) Keepalived() bool {
l := t.lastReadTime.Get()
if l > 0 && time.Now().Unix()-l > tableReaderKeepaliveInterval {
return false
}
return true
}
func (t *tableReader) getLogPos(index int) (uint32, error) {
2014-11-06 14:35:13 +03:00
// if _, err := t.pf.Seek(t.offsetStartPos+int64(index*4), os.SEEK_SET); err != nil {
// return 0, err
// }
2014-11-06 14:35:13 +03:00
// var pos uint32
// if err := binary.Read(t.pf, binary.BigEndian, &pos); err != nil {
// return 0, err
// }
// return pos, nil
return binary.BigEndian.Uint32(t.m[index*4:]), nil
}
2014-11-05 12:34:58 +03:00
func (t *tableReader) check() error {
var err error
if t.f, err = os.Open(t.name); err != nil {
return err
}
st, _ := t.f.Stat()
if st.Size() < 32 {
return fmt.Errorf("file size %d too short", st.Size())
}
var pos int64
if pos, err = t.f.Seek(-32, os.SEEK_END); err != nil {
2014-11-05 12:34:58 +03:00
return err
}
if err = binary.Read(t.f, binary.BigEndian, &t.offsetStartPos); err != nil {
return err
} else if t.offsetStartPos >= st.Size() {
return fmt.Errorf("invalid offset start pos %d, file size %d", t.offsetStartPos, st.Size())
} else if t.offsetStartPos%pageSize != 0 {
return fmt.Errorf("invalid offset start pos %d, must page size %d multi", t.offsetStartPos, pageSize)
2014-11-05 12:34:58 +03:00
}
if err = binary.Read(t.f, binary.BigEndian, &t.offsetLen); err != nil {
return err
} else if int64(t.offsetLen) >= st.Size() || t.offsetLen == 0 {
return fmt.Errorf("invalid offset len %d, file size %d", t.offsetLen, st.Size())
} else if t.offsetLen%4 != 0 {
return fmt.Errorf("invalid offset len %d, must 4 multiple", t.offsetLen)
}
if t.offsetStartPos+int64(t.offsetLen) != pos {
return fmt.Errorf("invalid offset %d %d", t.offsetStartPos, t.offsetLen)
}
2014-11-05 12:34:58 +03:00
b := make([]byte, 20)
if _, err = t.f.Read(b); err != nil {
return err
} else if !bytes.Equal(b, magic) {
return fmt.Errorf("invalid magic data %q", b)
}
2014-11-06 14:35:13 +03:00
if t.m, err = mmap.MapRegion(t.f, int(t.offsetLen), mmap.RDONLY, 0, t.offsetStartPos); err != nil {
2014-11-05 12:34:58 +03:00
return err
}
firstLogPos, _ := t.getLogPos(0)
lastLogPos, _ := t.getLogPos(int(t.offsetLen/4 - 1))
2014-11-05 12:34:58 +03:00
if firstLogPos != 0 {
return fmt.Errorf("invalid first log pos %d, must 0", firstLogPos)
} else if int64(lastLogPos) > t.offsetStartPos {
return fmt.Errorf("invalid last log pos %d", lastLogPos)
}
var l Log
if _, err = t.decodeLogHead(&l, int64(firstLogPos)); err != nil {
return fmt.Errorf("decode first log err %s", err.Error())
}
t.first = l.ID
var n int64
if n, err = t.decodeLogHead(&l, int64(lastLogPos)); err != nil {
return fmt.Errorf("decode last log err %s", err.Error())
} else {
var l0 Log
if _, err := t.f.Seek(n, os.SEEK_SET); err != nil {
return fmt.Errorf("seek logo err %s", err.Error())
} else if err = l0.Decode(t.f); err != nil {
println(lastLogPos, n, l0.ID, l0.CreateTime, l0.Compression)
return fmt.Errorf("decode log0 err %s", err.Error())
} else if !reflect.DeepEqual(l0, log0) {
return fmt.Errorf("invalid log0 %#v != %#v", l0, log0)
}
2014-11-05 12:34:58 +03:00
}
t.last = l.ID
2014-11-06 10:05:21 +03:00
t.lastTime = l.CreateTime
2014-11-05 12:34:58 +03:00
if t.first > t.last {
return fmt.Errorf("invalid log table first %d > last %d", t.first, t.last)
} else if (t.last - t.first + 1) != uint64(t.offsetLen/4) {
return fmt.Errorf("invalid log table, first %d, last %d, and log num %d", t.first, t.last, t.offsetLen/4)
}
return nil
}
func (t *tableReader) repair() error {
t.close()
2014-11-05 17:35:43 +03:00
var err error
if t.f, err = os.Open(t.name); err != nil {
return err
}
defer t.close()
2014-11-11 10:20:26 +03:00
st, _ := t.f.Stat()
size := st.Size()
if size == 0 {
return fmt.Errorf("empty file, can not repaired")
}
2014-11-06 08:46:02 +03:00
tw := newTableWriter(path.Dir(t.name), t.index, maxLogFileSize)
tmpName := tw.name + ".tmp"
tw.name = tmpName
os.Remove(tmpName)
2014-11-05 17:35:43 +03:00
defer func() {
tw.Close()
2014-11-06 08:46:02 +03:00
os.Remove(tmpName)
2014-11-05 17:35:43 +03:00
}()
var l Log
for {
2014-11-11 10:20:26 +03:00
lastPos, _ := t.f.Seek(0, os.SEEK_CUR)
if lastPos == size {
//no data anymore, we can not read log0
//we may meet the log missing risk but have no way
log.Error("no more data, maybe missing some logs, use your own risk!!!")
break
}
2014-11-05 17:35:43 +03:00
if err := l.Decode(t.f); err != nil {
return err
}
if l.ID == 0 {
break
}
2014-11-06 10:05:21 +03:00
t.lastTime = l.CreateTime
2014-11-05 17:35:43 +03:00
if err := tw.StoreLog(&l); err != nil {
return err
}
}
t.close()
var tr *tableReader
if tr, err = tw.Flush(); err != nil {
return err
}
t.first = tr.first
t.last = tr.last
t.offsetStartPos = tr.offsetStartPos
t.offsetLen = tr.offsetLen
2014-11-06 08:46:02 +03:00
defer tr.Close()
2014-11-05 17:35:43 +03:00
os.Remove(t.name)
2014-11-06 08:46:02 +03:00
if err := os.Rename(tmpName, t.name); err != nil {
2014-11-05 17:35:43 +03:00
return err
}
return nil
2014-11-05 12:34:58 +03:00
}
func (t *tableReader) decodeLogHead(l *Log, pos int64) (int64, error) {
_, err := t.f.Seek(int64(pos), os.SEEK_SET)
if err != nil {
return 0, err
}
dataLen, err := l.DecodeHead(t.f)
if err != nil {
return 0, err
}
return pos + int64(l.HeadSize()) + int64(dataLen), nil
}
2014-11-06 08:46:02 +03:00
func (t *tableReader) GetLog(id uint64, l *Log) error {
2014-11-05 12:34:58 +03:00
if id < t.first || id > t.last {
2014-11-07 11:35:54 +03:00
return ErrLogNotFound
2014-11-05 12:34:58 +03:00
}
t.lastReadTime.Set(time.Now().Unix())
t.Lock()
defer t.Unlock()
if err := t.openTable(); err != nil {
t.close()
return err
}
pos, err := t.getLogPos(int(id - t.first))
if err != nil {
return err
}
2014-11-05 12:34:58 +03:00
if _, err := t.f.Seek(int64(pos), os.SEEK_SET); err != nil {
return err
2014-11-05 12:34:58 +03:00
}
if err := l.Decode(t.f); err != nil {
return err
2014-11-05 12:34:58 +03:00
} else if l.ID != id {
return fmt.Errorf("invalid log id %d != %d", l.ID, id)
}
return nil
}
func (t *tableReader) openTable() error {
var err error
if t.f == nil {
if t.f, err = os.Open(t.name); err != nil {
return err
2014-11-05 12:34:58 +03:00
}
}
2014-11-06 14:35:13 +03:00
if t.m == nil {
if t.m, err = mmap.MapRegion(t.f, int(t.offsetLen), mmap.RDONLY, 0, t.offsetStartPos); err != nil {
2014-11-06 11:06:27 +03:00
return err
2014-11-05 12:34:58 +03:00
}
}
return nil
}
type tableWriter struct {
sync.RWMutex
wf *os.File
rf *os.File
rm sync.Mutex
base string
name string
index int64
first uint64
last uint64
offsetBuf []byte
maxLogSize int64
2014-11-07 11:35:54 +03:00
closed bool
syncType int
lastTime uint32
2014-11-05 12:34:58 +03:00
}
2014-11-05 17:35:43 +03:00
func newTableWriter(base string, index int64, maxLogSize int64) *tableWriter {
2014-11-06 16:52:18 +03:00
if index <= 0 {
panic(fmt.Errorf("invalid index %d", index))
}
2014-11-05 12:34:58 +03:00
t := new(tableWriter)
t.base = base
t.name = path.Join(base, fmtTableName(index))
t.index = index
t.maxLogSize = maxLogSize
2014-11-07 11:35:54 +03:00
t.closed = false
2014-11-05 17:35:43 +03:00
return t
2014-11-05 12:34:58 +03:00
}
2014-11-07 11:35:54 +03:00
func (t *tableWriter) SetMaxLogSize(s int64) {
t.maxLogSize = s
}
func (t *tableWriter) SetSyncType(tp int) {
t.syncType = tp
}
2014-11-05 12:34:58 +03:00
func (t *tableWriter) close() {
if t.rf != nil {
t.rf.Close()
t.rf = nil
}
if t.wf != nil {
t.wf.Close()
t.wf = nil
}
}
func (t *tableWriter) Close() {
t.Lock()
defer t.Unlock()
2014-11-07 11:35:54 +03:00
t.closed = true
2014-11-05 12:34:58 +03:00
t.close()
}
2014-11-06 16:52:18 +03:00
func (t *tableWriter) First() uint64 {
t.Lock()
id := t.first
t.Unlock()
return id
}
func (t *tableWriter) Last() uint64 {
t.Lock()
id := t.last
t.Unlock()
return id
}
2014-11-05 12:34:58 +03:00
func (t *tableWriter) reset() {
t.close()
t.first = 0
t.last = 0
t.index = t.index + 1
t.name = path.Join(t.base, fmtTableName(t.index))
t.offsetBuf = t.offsetBuf[0:0]
}
func (t *tableWriter) Flush() (*tableReader, error) {
t.Lock()
defer t.Unlock()
if t.wf == nil {
2014-11-11 10:20:26 +03:00
return nil, errNilHandler
2014-11-05 12:34:58 +03:00
}
defer t.reset()
tr := new(tableReader)
tr.name = t.name
tr.index = t.index
st, _ := t.wf.Stat()
tr.first = t.first
tr.last = t.last
if n, err := t.wf.Write(log0Data); err != nil {
return nil, fmt.Errorf("flush log0data error %s", err.Error())
2014-11-05 12:34:58 +03:00
} else if n != len(log0Data) {
return nil, fmt.Errorf("flush log0data only %d != %d", n, len(log0Data))
}
st, _ = t.wf.Stat()
if m := st.Size() % pageSize; m != 0 {
padding := pageSize - m
if n, err := t.wf.Write(make([]byte, padding)); err != nil {
return nil, fmt.Errorf("flush log padding error %s", err.Error())
} else if n != int(padding) {
return nil, fmt.Errorf("flush log padding error %d != %d", n, padding)
}
2014-11-05 12:34:58 +03:00
}
st, _ = t.wf.Stat()
if st.Size()%pageSize != 0 {
return nil, fmt.Errorf("invalid offset start pos, %d", st.Size())
}
tr.offsetStartPos = st.Size()
tr.offsetLen = uint32(len(t.offsetBuf))
2014-11-05 12:34:58 +03:00
if n, err := t.wf.Write(t.offsetBuf); err != nil {
log.Error("flush offset buffer error %s", err.Error())
return nil, err
} else if n != len(t.offsetBuf) {
log.Error("flush offset buffer only %d != %d", n, len(t.offsetBuf))
return nil, io.ErrShortWrite
}
if err := binary.Write(t.wf, binary.BigEndian, tr.offsetStartPos); err != nil {
log.Error("flush offset start pos error %s", err.Error())
return nil, err
}
if err := binary.Write(t.wf, binary.BigEndian, tr.offsetLen); err != nil {
log.Error("flush offset len error %s", err.Error())
return nil, err
}
if n, err := t.wf.Write(magic); err != nil {
log.Error("flush magic data error %s", err.Error())
return nil, err
} else if n != len(magic) {
log.Error("flush magic data only %d != %d", n, len(magic))
return nil, io.ErrShortWrite
}
return tr, nil
}
func (t *tableWriter) StoreLog(l *Log) error {
2014-11-06 08:46:02 +03:00
if l.ID == 0 {
return ErrStoreLogID
}
2014-11-05 12:34:58 +03:00
t.Lock()
defer t.Unlock()
2014-11-07 11:35:54 +03:00
if t.closed {
return fmt.Errorf("table writer is closed")
}
2014-11-05 12:34:58 +03:00
if t.last > 0 && l.ID != t.last+1 {
return ErrStoreLogID
}
if t.last-t.first+1 > maxLogNumInFile {
return errTableNeedFlush
}
var err error
if t.wf == nil {
2014-11-06 08:46:02 +03:00
if t.wf, err = os.OpenFile(t.name, os.O_CREATE|os.O_WRONLY, 0644); err != nil {
2014-11-05 12:34:58 +03:00
return err
}
}
if t.offsetBuf == nil {
t.offsetBuf = make([]byte, 0, maxLogNumInFile*4)
}
st, _ := t.wf.Stat()
if st.Size() >= t.maxLogSize {
return errTableNeedFlush
}
offsetPos := uint32(st.Size())
if err := l.Encode(t.wf); err != nil {
return err
}
t.offsetBuf = append(t.offsetBuf, num.Uint32ToBytes(offsetPos)...)
if t.first == 0 {
t.first = l.ID
}
t.last = l.ID
2014-11-07 11:35:54 +03:00
t.lastTime = l.CreateTime
2014-11-05 12:34:58 +03:00
//todo add LRU cache
2014-11-11 10:20:26 +03:00
if t.syncType == 2 {
2014-11-07 11:35:54 +03:00
if err := t.wf.Sync(); err != nil {
log.Error("sync table error %s", err.Error())
}
}
2014-11-05 12:34:58 +03:00
return nil
}
func (t *tableWriter) GetLog(id uint64, l *Log) error {
t.RLock()
defer t.RUnlock()
2014-11-06 08:46:02 +03:00
if id < t.first || id > t.last {
2014-11-07 11:35:54 +03:00
return ErrLogNotFound
2014-11-05 12:34:58 +03:00
}
//todo memory cache
offset := binary.BigEndian.Uint32(t.offsetBuf[(id-t.first)*4:])
if err := t.getLog(l, int64(offset)); err != nil {
return err
} else if l.ID != id {
return fmt.Errorf("invalid log id %d != %d", id, l.ID)
}
return nil
}
2014-11-11 10:20:26 +03:00
func (t *tableWriter) Sync() error {
t.Lock()
defer t.Unlock()
if t.wf != nil {
return t.wf.Sync()
}
return nil
}
2014-11-05 12:34:58 +03:00
func (t *tableWriter) getLog(l *Log, pos int64) error {
t.rm.Lock()
defer t.rm.Unlock()
var err error
if t.rf == nil {
if t.rf, err = os.Open(t.name); err != nil {
return err
}
}
if _, err = t.rf.Seek(pos, os.SEEK_SET); err != nil {
return err
}
if err = l.Decode(t.rf); err != nil {
return err
}
return nil
}