From ca344836ab73bdff9b84d75496862d9fa60c09ad Mon Sep 17 00:00:00 2001 From: siddontang Date: Sun, 1 May 2016 23:33:02 +0800 Subject: [PATCH] use vendor and do clean up. (#241) * use vendor and do clean up. --- .travis.yml | 12 +- Godeps/Godeps.json | 83 - Godeps/_workspace/.gitignore | 2 - .../toml/cmd/toml-test-decoder/COPYING | 14 - .../toml/cmd/toml-test-decoder/README.md | 14 - .../toml/cmd/toml-test-decoder/main.go | 90 - .../toml/cmd/toml-test-encoder/COPYING | 14 - .../toml/cmd/toml-test-encoder/README.md | 14 - .../toml/cmd/toml-test-encoder/main.go | 131 - .../BurntSushi/toml/cmd/tomlv/COPYING | 14 - .../BurntSushi/toml/cmd/tomlv/README.md | 22 - .../BurntSushi/toml/cmd/tomlv/main.go | 61 - .../github.com/BurntSushi/toml/decode_test.go | 950 -- .../github.com/BurntSushi/toml/encode_test.go | 542 - .../src/github.com/boltdb/bolt/.gitignore | 4 - .../src/github.com/boltdb/bolt/Makefile | 54 - .../src/github.com/boltdb/bolt/README.md | 621 -- .../src/github.com/boltdb/bolt/batch.go | 138 - .../boltdb/bolt/batch_benchmark_test.go | 170 - .../boltdb/bolt/batch_example_test.go | 148 - .../src/github.com/boltdb/bolt/batch_test.go | 167 - .../src/github.com/boltdb/bolt/bolt_386.go | 7 - .../src/github.com/boltdb/bolt/bolt_amd64.go | 7 - .../src/github.com/boltdb/bolt/bolt_arm.go | 7 - .../src/github.com/boltdb/bolt/bolt_linux.go | 12 - .../github.com/boltdb/bolt/bolt_openbsd.go | 29 - .../src/github.com/boltdb/bolt/bolt_test.go | 36 - .../src/github.com/boltdb/bolt/bolt_unix.go | 100 - .../github.com/boltdb/bolt/bolt_windows.go | 76 - .../github.com/boltdb/bolt/boltsync_unix.go | 10 - .../src/github.com/boltdb/bolt/bucket.go | 743 -- .../src/github.com/boltdb/bolt/bucket_test.go | 1169 -- .../github.com/boltdb/bolt/cmd/bolt/main.go | 1529 --- .../boltdb/bolt/cmd/bolt/main_test.go | 145 - .../src/github.com/boltdb/bolt/cursor.go | 384 - .../src/github.com/boltdb/bolt/cursor_test.go | 511 - .../src/github.com/boltdb/bolt/db.go | 792 -- .../src/github.com/boltdb/bolt/db_test.go | 903 -- .../src/github.com/boltdb/bolt/doc.go | 44 - .../src/github.com/boltdb/bolt/errors.go | 70 - .../src/github.com/boltdb/bolt/freelist.go | 242 - .../github.com/boltdb/bolt/freelist_test.go | 156 - .../src/github.com/boltdb/bolt/node.go | 636 -- .../src/github.com/boltdb/bolt/node_test.go | 156 - .../src/github.com/boltdb/bolt/page.go | 172 - .../src/github.com/boltdb/bolt/page_test.go | 72 - .../src/github.com/boltdb/bolt/quick_test.go | 79 - .../github.com/boltdb/bolt/simulation_test.go | 327 - .../src/github.com/boltdb/bolt/tx.go | 611 -- .../src/github.com/boltdb/bolt/tx_test.go | 456 - .../github.com/cupcake/rdb/decoder_test.go | 307 - .../github.com/cupcake/rdb/encoder_test.go | 43 - .../github.com/cupcake/rdb/examples/diff.go | 65 - .../cupcake/rdb/fixtures/dictionary.rdb | Bin 102032 -> 0 bytes .../easily_compressible_string_key.rdb | Bin 64 -> 0 bytes .../cupcake/rdb/fixtures/empty_database.rdb | 1 - .../cupcake/rdb/fixtures/hash_as_ziplist.rdb | Bin 85 -> 0 bytes .../cupcake/rdb/fixtures/integer_keys.rdb | Bin 182 -> 0 bytes .../cupcake/rdb/fixtures/intset_16.rdb | Bin 38 -> 0 bytes .../cupcake/rdb/fixtures/intset_32.rdb | Bin 44 -> 0 bytes .../cupcake/rdb/fixtures/intset_64.rdb | Bin 56 -> 0 bytes .../cupcake/rdb/fixtures/keys_with_expiry.rdb | Bin 71 -> 0 bytes .../cupcake/rdb/fixtures/linkedlist.rdb | Bin 51032 -> 0 bytes .../rdb/fixtures/multiple_databases.rdb | Bin 74 -> 0 bytes .../fixtures/rdb_version_5_with_checksum.rdb | Bin 128 -> 0 bytes .../cupcake/rdb/fixtures/regular_set.rdb | Bin 59 -> 0 bytes .../rdb/fixtures/regular_sorted_set.rdb | Bin 33471 -> 0 bytes .../rdb/fixtures/sorted_set_as_ziplist.rdb | Bin 178 -> 0 bytes .../fixtures/uncompressible_string_keys.rdb | Bin 32604 -> 0 bytes .../ziplist_that_compresses_easily.rdb | Bin 103 -> 0 bytes .../fixtures/ziplist_that_doesnt_compress.rdb | Bin 125 -> 0 bytes .../rdb/fixtures/ziplist_with_integers.rdb | Bin 130 -> 0 bytes .../zipmap_that_compresses_easily.rdb | Bin 73 -> 0 bytes .../fixtures/zipmap_that_doesnt_compress.rdb | Bin 60 -> 0 bytes .../rdb/fixtures/zipmap_with_big_values.rdb | Bin 20923 -> 0 bytes .../github.com/edsrzf/mmap-go/mmap_test.go | 102 - .../github.com/golang/snappy/snappy_test.go | 377 - .../src/github.com/peterh/liner/input_test.go | 61 - .../src/github.com/peterh/liner/line_test.go | 90 - .../github.com/peterh/liner/prefix_test.go | 37 - .../src/github.com/peterh/liner/race_test.go | 44 - .../src/github.com/peterh/liner/width_test.go | 102 - .../siddontang/go/bson/bson_test.go | 1472 --- .../siddontang/go/filelock/file_lock_test.go | 77 - .../siddontang/go/hack/hack_test.go | 36 - .../go/ioutil2/sectionwriter_test.go | 56 - .../github.com/siddontang/go/log/log_test.go | 63 - .../github.com/siddontang/go/num/num_test.go | 209 - .../siddontang/go/snappy/snappy_test.go | 261 - .../siddontang/go/sync2/atomic_test.go | 51 - .../siddontang/go/sync2/semaphore_test.go | 41 - .../siddontang/goredis/goredis_test.go | 39 - .../siddontang/goredis/resp_test.go | 81 - .../github.com/siddontang/rdb/loader_test.go | 373 - .../src/github.com/siddontang/rdb/rdb_test.go | 23 - .../syndtr/goleveldb/leveldb/batch_test.go | 120 - .../syndtr/goleveldb/leveldb/bench2_test.go | 58 - .../syndtr/goleveldb/leveldb/bench_test.go | 464 - .../goleveldb/leveldb/cache/bench2_test.go | 30 - .../goleveldb/leveldb/cache/cache_test.go | 554 - .../syndtr/goleveldb/leveldb/corrupt_test.go | 500 - .../syndtr/goleveldb/leveldb/db_test.go | 2701 ----- .../syndtr/goleveldb/leveldb/external_test.go | 58 - .../goleveldb/leveldb/filter/bloom_test.go | 142 - .../leveldb/iterator/array_iter_test.go | 30 - .../leveldb/iterator/indexed_iter_test.go | 83 - .../leveldb/iterator/iter_suite_test.go | 11 - .../leveldb/iterator/merged_iter_test.go | 60 - .../goleveldb/leveldb/journal/journal_test.go | 818 -- .../syndtr/goleveldb/leveldb/key.go | 142 - .../syndtr/goleveldb/leveldb/key_test.go | 133 - .../goleveldb/leveldb/leveldb_suite_test.go | 11 - .../goleveldb/leveldb/memdb/bench_test.go | 75 - .../leveldb/memdb/memdb_suite_test.go | 11 - .../goleveldb/leveldb/memdb/memdb_test.go | 135 - .../goleveldb/leveldb/session_record_test.go | 64 - .../goleveldb/leveldb/storage/file_storage.go | 543 - .../leveldb/storage/file_storage_test.go | 142 - .../goleveldb/leveldb/storage/mem_storage.go | 203 - .../leveldb/storage/mem_storage_test.go | 66 - .../syndtr/goleveldb/leveldb/storage_test.go | 549 - .../goleveldb/leveldb/table/block_test.go | 139 - .../leveldb/table/table_suite_test.go | 11 - .../goleveldb/leveldb/table/table_test.go | 122 - .../syndtr/goleveldb/leveldb/testutil/db.go | 222 - .../goleveldb/leveldb/testutil/ginkgo.go | 21 - .../syndtr/goleveldb/leveldb/testutil/iter.go | 327 - .../syndtr/goleveldb/leveldb/testutil/kv.go | 352 - .../goleveldb/leveldb/testutil/kvtest.go | 187 - .../goleveldb/leveldb/testutil/storage.go | 586 -- .../syndtr/goleveldb/leveldb/testutil/util.go | 171 - .../syndtr/goleveldb/leveldb/testutil_test.go | 63 - .../goleveldb/leveldb/util/buffer_test.go | 369 - .../syndtr/goleveldb/leveldb/util/pool.go | 21 - .../goleveldb/leveldb/util/pool_legacy.go | 33 - .../github.com/ugorji/go/codec/cbor_test.go | 205 - .../github.com/ugorji/go/codec/codec_test.go | 1117 -- .../ugorji/go/codec/codecgen/README.md | 36 - .../ugorji/go/codec/codecgen/gen.go | 271 - .../github.com/ugorji/go/codec/codecgen/z.go | 3 - .../ugorji/go/codec/codecgen_test.go | 22 - .../github.com/ugorji/go/codec/helper_test.go | 155 - .../src/github.com/ugorji/go/codec/py_test.go | 29 - .../github.com/ugorji/go/codec/values_test.go | 203 - Makefile | 52 +- README.md | 18 +- bootstrap.sh | 13 - cmd/Godeps/Godeps.json | 135 + {Godeps => cmd/Godeps}/Readme | 0 .../github.com/BurntSushi/toml/.gitignore | 0 .../github.com/BurntSushi/toml/.travis.yml | 0 .../github.com/BurntSushi/toml/COMPATIBLE | 0 .../github.com/BurntSushi/toml/COPYING | 0 .../github.com/BurntSushi/toml/Makefile | 0 .../github.com/BurntSushi/toml/README.md | 0 .../github.com/BurntSushi/toml/decode.go | 0 .../github.com/BurntSushi/toml/decode_meta.go | 0 .../vendor}/github.com/BurntSushi/toml/doc.go | 0 .../github.com/BurntSushi/toml/encode.go | 0 .../BurntSushi/toml/encoding_types.go | 0 .../BurntSushi/toml/encoding_types_1.1.go | 0 .../vendor}/github.com/BurntSushi/toml/lex.go | 0 .../github.com/BurntSushi/toml/parse.go | 0 .../github.com/BurntSushi/toml/session.vim | 0 .../github.com/BurntSushi/toml/type_check.go | 0 .../github.com/BurntSushi/toml/type_fields.go | 0 .../vendor}/github.com/cupcake/rdb/.gitignore | 0 .../github.com/cupcake/rdb/.travis.yml | 0 .../vendor}/github.com/cupcake/rdb/LICENCE | 0 .../vendor}/github.com/cupcake/rdb/README.md | 0 .../github.com/cupcake/rdb/crc64/crc64.go | 0 .../vendor}/github.com/cupcake/rdb/decoder.go | 0 .../vendor}/github.com/cupcake/rdb/encoder.go | 0 .../cupcake/rdb/nopdecoder/nop_decoder.go | 0 .../github.com/cupcake/rdb/slice_buffer.go | 0 .../github.com/edsrzf/mmap-go/.gitignore | 0 .../vendor}/github.com/edsrzf/mmap-go/LICENSE | 0 .../github.com/edsrzf/mmap-go/README.md | 0 .../vendor}/github.com/edsrzf/mmap-go/mmap.go | 0 .../github.com/edsrzf/mmap-go/mmap_unix.go | 0 .../github.com/edsrzf/mmap-go/mmap_windows.go | 14 +- .../vendor}/github.com/golang/snappy/AUTHORS | 0 .../github.com/golang/snappy/CONTRIBUTORS | 0 .../vendor}/github.com/golang/snappy/LICENSE | 0 .../vendor}/github.com/golang/snappy/README | 0 .../github.com/golang/snappy/decode.go | 0 .../github.com/golang/snappy/encode.go | 0 .../github.com/golang/snappy/snappy.go | 0 .../vendor}/github.com/peterh/liner/COPYING | 0 .../vendor}/github.com/peterh/liner/README.md | 17 +- .../github.com/peterh/liner/bsdinput.go | 0 .../vendor}/github.com/peterh/liner/common.go | 10 +- .../github.com/peterh/liner/fallbackinput.go | 0 .../vendor}/github.com/peterh/liner/input.go | 6 + .../github.com/peterh/liner/input_darwin.go | 0 .../github.com/peterh/liner/input_linux.go | 0 .../github.com/peterh/liner/input_windows.go | 8 + .../vendor}/github.com/peterh/liner/line.go | 191 +- .../vendor}/github.com/peterh/liner/output.go | 12 + .../github.com/peterh/liner/output_windows.go | 18 + .../vendor}/github.com/peterh/liner/signal.go | 0 .../github.com/peterh/liner/signal_legacy.go | 0 .../github.com/peterh/liner/unixmode.go | 0 .../vendor}/github.com/peterh/liner/width.go | 19 + .../vendor/github.com/siddontang/go}/LICENSE | 2 +- .../github.com/siddontang/go/bson/LICENSE | 0 .../github.com/siddontang/go/bson/bson.go | 0 .../github.com/siddontang/go/bson/decode.go | 14 +- .../github.com/siddontang/go/bson/encode.go | 20 +- .../github.com/siddontang/go/filelock/LICENSE | 0 .../go/filelock/file_lock_generic.go | 0 .../go/filelock/file_lock_solaris.go | 0 .../siddontang/go/filelock/file_lock_unix.go | 0 .../go/filelock/file_lock_windows.go | 0 .../github.com/siddontang/go/hack/hack.go | 0 .../siddontang/go/ioutil2/ioutil.go | 0 .../siddontang/go/ioutil2/sectionwriter.go | 0 .../github.com/siddontang/go/log/doc.go | 0 .../siddontang/go/log/filehandler.go | 4 +- .../github.com/siddontang/go/log/handler.go | 3 +- .../github.com/siddontang/go/log/log.go | 25 + .../siddontang/go/log/sockethandler.go | 4 +- .../github.com/siddontang/go/num/bytes.go | 0 .../github.com/siddontang/go/num/cmp.go | 0 .../github.com/siddontang/go/num/str.go | 0 .../github.com/siddontang/go/snappy/LICENSE | 0 .../github.com/siddontang/go/snappy/decode.go | 0 .../github.com/siddontang/go/snappy/encode.go | 0 .../github.com/siddontang/go/snappy/snappy.go | 0 .../github.com/siddontang/go/sync2/atomic.go | 0 .../siddontang/go/sync2/semaphore.go | 0 .../github.com/siddontang/golua}/LICENSE | 0 .../github.com/siddontang/golua}/c-golua.c | 47 +- cmd/vendor/github.com/siddontang/golua/doc.go | 1 + .../github.com/siddontang/golua}/golua.go | 58 +- .../github.com/siddontang/golua}/golua.h | 8 +- .../siddontang/golua}/golua_license | 0 .../github.com/siddontang/golua}/lauxlib.go | 7 +- .../github.com/siddontang/golua}/lua.go | 24 +- .../github.com/siddontang/golua}/lua_cjson.c | 2 - .../siddontang/golua}/lua_cmsgpack.c | 2 - .../github.com/siddontang/golua}/lua_defs.go | 6 +- .../github.com/siddontang/golua}/lua_struct.c | 2 - .../github.com/siddontang/golua}/strbuf.c | 2 - .../github.com/siddontang/golua}/strbuf.h | 2 - .../github.com/siddontang/goredis}/LICENSE | 0 .../github.com/siddontang/goredis/client.go | 0 .../github.com/siddontang/goredis/conn.go | 0 .../github.com/siddontang/goredis/doc.go | 0 .../siddontang/goredis/garyburd_license | 0 .../github.com/siddontang/goredis/reply.go | 0 .../github.com/siddontang/goredis/resp.go | 0 .../vendor}/github.com/siddontang/rdb/LICENSE | 0 .../github.com/siddontang/rdb/README.md | 0 .../github.com/siddontang/rdb/decode.go | 0 .../github.com/siddontang/rdb/digest.go | 0 .../github.com/siddontang/rdb/encode.go | 0 .../github.com/siddontang/rdb/loader.go | 0 .../github.com/siddontang/rdb/reader.go | 0 .../siddontang/rdb/wandoujia-license | 0 .../github.com/syndtr/goleveldb/LICENSE | 24 + .../syndtr/goleveldb/leveldb/batch.go | 54 +- .../syndtr/goleveldb/leveldb/cache/cache.go | 14 +- .../syndtr/goleveldb/leveldb/cache/lru.go | 0 .../syndtr/goleveldb/leveldb/comparer.go | 12 +- .../leveldb/comparer/bytes_comparer.go | 0 .../goleveldb/leveldb/comparer/comparer.go | 0 .../github.com/syndtr/goleveldb/leveldb/db.go | 322 +- .../syndtr/goleveldb/leveldb/db_compaction.go | 215 +- .../syndtr/goleveldb/leveldb/db_iter.go | 52 +- .../syndtr/goleveldb/leveldb/db_snapshot.go | 8 +- .../syndtr/goleveldb/leveldb/db_state.go | 58 +- .../goleveldb/leveldb/db_transaction.go | 289 + .../syndtr/goleveldb/leveldb/db_util.go | 62 +- .../syndtr/goleveldb/leveldb/db_write.go | 48 +- .../syndtr/goleveldb/leveldb/doc.go | 0 .../syndtr/goleveldb/leveldb/errors.go | 0 .../syndtr/goleveldb/leveldb/errors/errors.go | 20 +- .../syndtr/goleveldb/leveldb/filter.go | 4 +- .../syndtr/goleveldb/leveldb/filter/bloom.go | 0 .../syndtr/goleveldb/leveldb/filter/filter.go | 0 .../goleveldb/leveldb/iterator/array_iter.go | 0 .../leveldb/iterator/indexed_iter.go | 0 .../syndtr/goleveldb/leveldb/iterator/iter.go | 0 .../goleveldb/leveldb/iterator/merged_iter.go | 0 .../goleveldb/leveldb/journal/journal.go | 3 +- .../syndtr/goleveldb/leveldb/key.go | 147 + .../syndtr/goleveldb/leveldb/memdb/memdb.go | 0 .../syndtr/goleveldb/leveldb/opt/options.go | 58 +- .../syndtr/goleveldb/leveldb/options.go | 41 +- .../syndtr/goleveldb/leveldb/session.go | 64 +- .../goleveldb/leveldb/session_compaction.go | 125 +- .../goleveldb/leveldb/session_record.go | 92 +- .../syndtr/goleveldb/leveldb/session_util.go | 103 +- .../goleveldb/leveldb/storage/file_storage.go | 583 + .../leveldb/storage/file_storage_plan9.go | 17 +- .../leveldb/storage/file_storage_solaris.go | 25 +- .../leveldb/storage/file_storage_unix.go | 25 +- .../leveldb/storage/file_storage_windows.go | 17 +- .../goleveldb/leveldb/storage/mem_storage.go | 218 + .../goleveldb/leveldb/storage/storage.go | 148 +- .../syndtr/goleveldb/leveldb/table.go | 88 +- .../syndtr/goleveldb/leveldb/table/reader.go | 12 +- .../syndtr/goleveldb/leveldb/table/table.go | 0 .../syndtr/goleveldb/leveldb/table/writer.go | 0 .../syndtr/goleveldb/leveldb/util.go | 14 +- .../syndtr/goleveldb/leveldb/util/buffer.go | 0 .../goleveldb/leveldb/util/buffer_pool.go | 0 .../syndtr/goleveldb/leveldb/util/crc32.go | 0 .../syndtr/goleveldb/leveldb/util/hash.go | 28 +- .../syndtr/goleveldb/leveldb/util/range.go | 0 .../syndtr/goleveldb/leveldb/util/util.go | 0 .../syndtr/goleveldb/leveldb/version.go | 308 +- cmd/vendor/github.com/ugorji/go/LICENSE | 22 + .../github.com/ugorji/go/codec/0doc.go | 0 .../github.com/ugorji/go/codec/README.md | 0 .../github.com/ugorji/go/codec/binc.go | 0 .../github.com/ugorji/go/codec/cbor.go | 0 .../github.com/ugorji/go/codec/decode.go | 0 .../github.com/ugorji/go/codec/encode.go | 0 .../ugorji/go/codec/fast-path.generated.go | 0 .../ugorji/go/codec/fast-path.go.tmpl | 0 .../ugorji/go/codec/gen-dec-array.go.tmpl | 0 .../ugorji/go/codec/gen-dec-map.go.tmpl | 0 .../ugorji/go/codec/gen-helper.generated.go | 0 .../ugorji/go/codec/gen-helper.go.tmpl | 0 .../ugorji/go/codec/gen.generated.go | 0 .../vendor}/github.com/ugorji/go/codec/gen.go | 0 .../github.com/ugorji/go/codec/helper.go | 0 .../ugorji/go/codec/helper_internal.go | 0 .../ugorji/go/codec/helper_not_unsafe.go | 0 .../ugorji/go/codec/helper_unsafe.go | 0 .../github.com/ugorji/go/codec/json.go | 0 .../github.com/ugorji/go/codec/msgpack.go | 0 .../github.com/ugorji/go/codec/noop.go | 0 .../github.com/ugorji/go/codec/prebuild.go | 0 .../github.com/ugorji/go/codec/prebuild.sh | 0 .../vendor}/github.com/ugorji/go/codec/rpc.go | 0 .../github.com/ugorji/go/codec/simple.go | 0 .../ugorji/go/codec/test-cbor-goldens.json | 0 .../github.com/ugorji/go/codec/test.py | 0 .../github.com/ugorji/go/codec/time.go | 0 config/config.toml | 2 - etc/ledis.conf | 2 - server/client_http.go | 20 +- server/cmd_script.go | 4 +- server/cmd_sort_test.go | 2 +- server/script.go | 4 +- server/script_test.go | 2 +- store/boltdb/const.go | 3 - store/boltdb/db.go | 173 - store/boltdb/iterator.go | 50 - store/boltdb/snapshot.go | 37 - store/boltdb/tx.go | 61 - store/driver/store.go | 2 +- store/mdb/const.go | 3 - store/mdb/influxdb_license | 20 - store/mdb/mdb.go | 317 - store/mdb/snapshot.go | 43 - store/mdb/tx.go | 90 - store/store.go | 2 - vendor/README.md | 3 - vendor/gomdb/LICENSE | 10 - vendor/gomdb/README.md | 28 - vendor/gomdb/bench_test.go | 334 - vendor/gomdb/cursor.go | 105 - vendor/gomdb/env.go | 229 - vendor/gomdb/env_test.go | 81 - vendor/gomdb/error_test.go | 23 - vendor/gomdb/example_test.go | 70 - vendor/gomdb/lmdb.h | 1555 --- vendor/gomdb/mdb.c | 9366 ----------------- vendor/gomdb/mdb.go | 15 - vendor/gomdb/mdb_test.go | 115 - vendor/gomdb/midl.c | 362 - vendor/gomdb/midl.h | 188 - vendor/gomdb/txn.go | 199 - vendor/gomdb/val.go | 54 - vendor/gomdb/val_test.go | 37 - vendor/lua/lua_test.go | 386 - 380 files changed, 3016 insertions(+), 44172 deletions(-) delete mode 100644 Godeps/Godeps.json delete mode 100644 Godeps/_workspace/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/Makefile delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/README.md delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/batch.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/db.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/doc.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/errors.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/node.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/page.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/tx.go delete mode 100644 Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/decoder_test.go delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/encoder_test.go delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/examples/diff.go delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/dictionary.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/easily_compressible_string_key.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/empty_database.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/hash_as_ziplist.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/integer_keys.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/intset_16.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/intset_32.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/intset_64.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/keys_with_expiry.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/linkedlist.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/multiple_databases.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/rdb_version_5_with_checksum.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/regular_set.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/regular_sorted_set.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/sorted_set_as_ziplist.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/uncompressible_string_keys.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_that_compresses_easily.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_that_doesnt_compress.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_with_integers.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_that_compresses_easily.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_that_doesnt_compress.rdb delete mode 100644 Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_with_big_values.rdb delete mode 100644 Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_test.go delete mode 100644 Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go delete mode 100644 Godeps/_workspace/src/github.com/peterh/liner/input_test.go delete mode 100644 Godeps/_workspace/src/github.com/peterh/liner/line_test.go delete mode 100644 Godeps/_workspace/src/github.com/peterh/liner/prefix_test.go delete mode 100644 Godeps/_workspace/src/github.com/peterh/liner/race_test.go delete mode 100644 Godeps/_workspace/src/github.com/peterh/liner/width_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/bson/bson_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/hack/hack_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/ioutil2/sectionwriter_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/log/log_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/num/num_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/snappy/snappy_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/sync2/atomic_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/go/sync2/semaphore_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/goredis/goredis_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/goredis/resp_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/rdb/loader_test.go delete mode 100644 Godeps/_workspace/src/github.com/siddontang/rdb/rdb_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go delete mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/py_test.go delete mode 100644 Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go delete mode 100755 bootstrap.sh create mode 100644 cmd/Godeps/Godeps.json rename {Godeps => cmd/Godeps}/Readme (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/.gitignore (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/.travis.yml (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/COMPATIBLE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/COPYING (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/Makefile (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/README.md (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/decode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/decode_meta.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/doc.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/encode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/encoding_types.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/encoding_types_1.1.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/lex.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/parse.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/session.vim (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/type_check.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/BurntSushi/toml/type_fields.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/.gitignore (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/.travis.yml (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/LICENCE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/README.md (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/crc64/crc64.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/decoder.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/encoder.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/nopdecoder/nop_decoder.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/cupcake/rdb/slice_buffer.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/edsrzf/mmap-go/.gitignore (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/edsrzf/mmap-go/LICENSE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/edsrzf/mmap-go/README.md (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/edsrzf/mmap-go/mmap.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/edsrzf/mmap-go/mmap_unix.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/edsrzf/mmap-go/mmap_windows.go (78%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/golang/snappy/AUTHORS (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/golang/snappy/CONTRIBUTORS (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/golang/snappy/LICENSE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/golang/snappy/README (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/golang/snappy/decode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/golang/snappy/encode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/golang/snappy/snappy.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/COPYING (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/README.md (87%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/bsdinput.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/common.go (95%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/fallbackinput.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/input.go (97%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/input_darwin.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/input_linux.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/input_windows.go (95%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/line.go (81%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/output.go (86%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/output_windows.go (69%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/signal.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/signal_legacy.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/unixmode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/peterh/liner/width.go (75%) rename {Godeps/_workspace/src/github.com/boltdb/bolt => cmd/vendor/github.com/siddontang/go}/LICENSE (97%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/bson/LICENSE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/bson/bson.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/bson/decode.go (99%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/bson/encode.go (97%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/filelock/LICENSE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/filelock/file_lock_generic.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/filelock/file_lock_solaris.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/filelock/file_lock_unix.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/filelock/file_lock_windows.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/hack/hack.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/ioutil2/ioutil.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/ioutil2/sectionwriter.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/log/doc.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/log/filehandler.go (98%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/log/handler.go (93%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/log/log.go (91%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/log/sockethandler.go (95%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/num/bytes.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/num/cmp.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/num/str.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/snappy/LICENSE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/snappy/decode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/snappy/encode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/snappy/snappy.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/sync2/atomic.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/go/sync2/semaphore.go (100%) rename {Godeps/_workspace/src/github.com/siddontang/goredis => cmd/vendor/github.com/siddontang/golua}/LICENSE (100%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/c-golua.c (90%) create mode 100644 cmd/vendor/github.com/siddontang/golua/doc.go rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/golua.go (77%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/golua.h (88%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/golua_license (100%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/lauxlib.go (96%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/lua.go (95%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/lua_cjson.c (99%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/lua_cmsgpack.c (99%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/lua_defs.go (97%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/lua_struct.c (99%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/strbuf.c (99%) rename {vendor/lua => cmd/vendor/github.com/siddontang/golua}/strbuf.h (99%) rename {vendor/lua => cmd/vendor/github.com/siddontang/goredis}/LICENSE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/goredis/client.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/goredis/conn.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/goredis/doc.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/goredis/garyburd_license (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/goredis/reply.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/goredis/resp.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/rdb/LICENSE (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/rdb/README.md (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/rdb/decode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/rdb/digest.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/rdb/encode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/rdb/loader.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/rdb/reader.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/siddontang/rdb/wandoujia-license (100%) create mode 100644 cmd/vendor/github.com/syndtr/goleveldb/LICENSE rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/batch.go (80%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/cache/cache.go (95%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/cache/lru.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/comparer.go (83%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/db.go (78%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/db_compaction.go (78%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/db_iter.go (81%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/db_snapshot.go (94%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/db_state.go (79%) create mode 100644 cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/db_util.go (57%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/db_write.go (85%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/doc.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/errors.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/errors/errors.go (79%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/filter.go (84%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/filter/bloom.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/filter/filter.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/iterator/iter.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/journal/journal.go (99%) create mode 100644 cmd/vendor/github.com/syndtr/goleveldb/leveldb/key.go rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/opt/options.go (94%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/options.go (67%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/session.go (67%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/session_compaction.go (63%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/session_record.go (72%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/session_util.go (63%) create mode 100644 cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go (71%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go (63%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go (68%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go (69%) create mode 100644 cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/storage/storage.go (53%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/table.go (84%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/table/reader.go (98%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/table/table.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/table/writer.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/util.go (83%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/util/buffer.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/util/crc32.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/util/hash.go (60%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/util/range.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/util/util.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/syndtr/goleveldb/leveldb/version.go (52%) create mode 100644 cmd/vendor/github.com/ugorji/go/LICENSE rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/0doc.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/README.md (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/binc.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/cbor.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/decode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/encode.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/fast-path.generated.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/fast-path.go.tmpl (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/gen-dec-array.go.tmpl (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/gen-dec-map.go.tmpl (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/gen-helper.generated.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/gen-helper.go.tmpl (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/gen.generated.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/gen.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/helper.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/helper_internal.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/helper_not_unsafe.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/helper_unsafe.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/json.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/msgpack.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/noop.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/prebuild.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/prebuild.sh (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/rpc.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/simple.go (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/test-cbor-goldens.json (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/test.py (100%) rename {Godeps/_workspace/src => cmd/vendor}/github.com/ugorji/go/codec/time.go (100%) delete mode 100644 store/boltdb/const.go delete mode 100644 store/boltdb/db.go delete mode 100644 store/boltdb/iterator.go delete mode 100644 store/boltdb/snapshot.go delete mode 100644 store/boltdb/tx.go delete mode 100644 store/mdb/const.go delete mode 100644 store/mdb/influxdb_license delete mode 100644 store/mdb/mdb.go delete mode 100644 store/mdb/snapshot.go delete mode 100644 store/mdb/tx.go delete mode 100644 vendor/README.md delete mode 100644 vendor/gomdb/LICENSE delete mode 100644 vendor/gomdb/README.md delete mode 100644 vendor/gomdb/bench_test.go delete mode 100644 vendor/gomdb/cursor.go delete mode 100644 vendor/gomdb/env.go delete mode 100644 vendor/gomdb/env_test.go delete mode 100644 vendor/gomdb/error_test.go delete mode 100644 vendor/gomdb/example_test.go delete mode 100644 vendor/gomdb/lmdb.h delete mode 100644 vendor/gomdb/mdb.c delete mode 100644 vendor/gomdb/mdb.go delete mode 100644 vendor/gomdb/mdb_test.go delete mode 100644 vendor/gomdb/midl.c delete mode 100644 vendor/gomdb/midl.h delete mode 100644 vendor/gomdb/txn.go delete mode 100644 vendor/gomdb/val.go delete mode 100644 vendor/gomdb/val_test.go delete mode 100644 vendor/lua/lua_test.go diff --git a/.travis.yml b/.travis.yml index 52e07e4..20084aa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,7 @@ language: go -go: 1.3.3 -before_install: - - go get github.com/tools/godep - - go get code.google.com/p/go.tools/cmd/cover - - go install -race std +go: + - 1.5 + - 1.6 + script: - - godep go test -cover ./... - - godep go test -race ./... + - make travis diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index 81460c7..0000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "ImportPath": "github.com/siddontang/ledisdb", - "GoVersion": "go1.5beta1", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/BurntSushi/toml", - "Comment": "v0.1.0-21-g056c9bc", - "Rev": "056c9bc7be7190eaa7715723883caffa5f8fa3e4" - }, - { - "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.0-115-gc2745b3", - "Rev": "c2745b3c62985affcf08d0522135f4747e9b81f3" - }, - { - "ImportPath": "github.com/cupcake/rdb", - "Rev": "3454dcabd33cb8ea8261ffd6a45f4d836eb504cc" - }, - { - "ImportPath": "github.com/edsrzf/mmap-go", - "Rev": "6c75090c55983bef2e129e173681b20d24871ef8" - }, - { - "ImportPath": "github.com/golang/snappy", - "Rev": "723cc1e459b8eea2dea4583200fd60757d40097a" - }, - { - "ImportPath": "github.com/peterh/liner", - "Rev": "1bb0d1c1a25ed393d8feb09bab039b2b1b1fbced" - }, - { - "ImportPath": "github.com/siddontang/go/bson", - "Rev": "530a23162549a31baa14dfa3b647a9eccee8878f" - }, - { - "ImportPath": "github.com/siddontang/go/filelock", - "Rev": "530a23162549a31baa14dfa3b647a9eccee8878f" - }, - { - "ImportPath": "github.com/siddontang/go/hack", - "Rev": "530a23162549a31baa14dfa3b647a9eccee8878f" - }, - { - "ImportPath": "github.com/siddontang/go/ioutil2", - "Rev": "530a23162549a31baa14dfa3b647a9eccee8878f" - }, - { - "ImportPath": "github.com/siddontang/go/log", - "Rev": "530a23162549a31baa14dfa3b647a9eccee8878f" - }, - { - "ImportPath": "github.com/siddontang/go/num", - "Rev": "530a23162549a31baa14dfa3b647a9eccee8878f" - }, - { - "ImportPath": "github.com/siddontang/go/snappy", - "Rev": "530a23162549a31baa14dfa3b647a9eccee8878f" - }, - { - "ImportPath": "github.com/siddontang/go/sync2", - "Rev": "530a23162549a31baa14dfa3b647a9eccee8878f" - }, - { - "ImportPath": "github.com/siddontang/goredis", - "Rev": "760763f78400635ed7b9b115511b8ed06035e908" - }, - { - "ImportPath": "github.com/siddontang/rdb", - "Rev": "fc89ed2e418d27e3ea76e708e54276d2b44ae9cf" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0" - }, - { - "ImportPath": "github.com/ugorji/go/codec", - "Rev": "5abd4e96a45c386928ed2ca2a7ef63e2533e18ec" - } - ] -} diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d68..0000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md deleted file mode 100644 index 24421eb..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Implements the TOML test suite interface - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for my -[toml parser written in Go](https://github.com/BurntSushi/toml). -In particular, it maps TOML data on `stdin` to a JSON format on `stdout`. - - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go deleted file mode 100644 index 14e7557..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go +++ /dev/null @@ -1,90 +0,0 @@ -// Command toml-test-decoder satisfies the toml-test interface for testing -// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout. -package main - -import ( - "encoding/json" - "flag" - "fmt" - "log" - "os" - "path" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil { - log.Fatalf("Error decoding TOML: %s", err) - } - - typedTmp := translate(tmp) - if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil { - log.Fatalf("Error encoding JSON: %s", err) - } -} - -func translate(tomlData interface{}) interface{} { - switch orig := tomlData.(type) { - case map[string]interface{}: - typed := make(map[string]interface{}, len(orig)) - for k, v := range orig { - typed[k] = translate(v) - } - return typed - case []map[string]interface{}: - typed := make([]map[string]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v).(map[string]interface{}) - } - return typed - case []interface{}: - typed := make([]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v) - } - - // We don't really need to tag arrays, but let's be future proof. - // (If TOML ever supports tuples, we'll need this.) - return tag("array", typed) - case time.Time: - return tag("datetime", orig.Format("2006-01-02T15:04:05Z")) - case bool: - return tag("bool", fmt.Sprintf("%v", orig)) - case int64: - return tag("integer", fmt.Sprintf("%d", orig)) - case float64: - return tag("float", fmt.Sprintf("%v", orig)) - case string: - return tag("string", orig) - } - - panic(fmt.Sprintf("Unknown type: %T", tomlData)) -} - -func tag(typeName string, data interface{}) map[string]interface{} { - return map[string]interface{}{ - "type": typeName, - "value": data, - } -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md deleted file mode 100644 index 45a603f..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Implements the TOML test suite interface for TOML encoders - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for the -[TOML encoder](https://github.com/BurntSushi/toml). -In particular, it maps JSON data on `stdin` to a TOML format on `stdout`. - - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go deleted file mode 100644 index 092cc68..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go +++ /dev/null @@ -1,131 +0,0 @@ -// Command toml-test-encoder satisfies the toml-test interface for testing -// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout. -package main - -import ( - "encoding/json" - "flag" - "log" - "os" - "path" - "strconv" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil { - log.Fatalf("Error decoding JSON: %s", err) - } - - tomlData := translate(tmp) - if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil { - log.Fatalf("Error encoding TOML: %s", err) - } -} - -func translate(typedJson interface{}) interface{} { - switch v := typedJson.(type) { - case map[string]interface{}: - if len(v) == 2 && in("type", v) && in("value", v) { - return untag(v) - } - m := make(map[string]interface{}, len(v)) - for k, v2 := range v { - m[k] = translate(v2) - } - return m - case []interface{}: - tabArray := make([]map[string]interface{}, len(v)) - for i := range v { - if m, ok := translate(v[i]).(map[string]interface{}); ok { - tabArray[i] = m - } else { - log.Fatalf("JSON arrays may only contain objects. This " + - "corresponds to only tables being allowed in " + - "TOML table arrays.") - } - } - return tabArray - } - log.Fatalf("Unrecognized JSON format '%T'.", typedJson) - panic("unreachable") -} - -func untag(typed map[string]interface{}) interface{} { - t := typed["type"].(string) - v := typed["value"] - switch t { - case "string": - return v.(string) - case "integer": - v := v.(string) - n, err := strconv.Atoi(v) - if err != nil { - log.Fatalf("Could not parse '%s' as integer: %s", v, err) - } - return n - case "float": - v := v.(string) - f, err := strconv.ParseFloat(v, 64) - if err != nil { - log.Fatalf("Could not parse '%s' as float64: %s", v, err) - } - return f - case "datetime": - v := v.(string) - t, err := time.Parse("2006-01-02T15:04:05Z", v) - if err != nil { - log.Fatalf("Could not parse '%s' as a datetime: %s", v, err) - } - return t - case "bool": - v := v.(string) - switch v { - case "true": - return true - case "false": - return false - } - log.Fatalf("Could not parse '%s' as a boolean.", v) - case "array": - v := v.([]interface{}) - array := make([]interface{}, len(v)) - for i := range v { - if m, ok := v[i].(map[string]interface{}); ok { - array[i] = untag(m) - } else { - log.Fatalf("Arrays may only contain other arrays or "+ - "primitive values, but found a '%T'.", m) - } - } - return array - } - log.Fatalf("Unrecognized tag type '%s'.", t) - panic("unreachable") -} - -func in(key string, m map[string]interface{}) bool { - _, ok := m[key] - return ok -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md deleted file mode 100644 index 5df0dc3..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# TOML Validator - -If Go is installed, it's simple to try it out: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -You can see the types of every key in a TOML file with: - -```bash -tomlv -types some-toml-file.toml -``` - -At the moment, only one error message is reported at a time. Error messages -include line numbers. No output means that the files given are valid TOML, or -there is a bug in `tomlv`. - -Compatible with TOML version -[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md) - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go deleted file mode 100644 index c7d689a..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go +++ /dev/null @@ -1,61 +0,0 @@ -// Command tomlv validates TOML documents and prints each key's type. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "path" - "strings" - "text/tabwriter" - - "github.com/BurntSushi/toml" -) - -var ( - flagTypes = false -) - -func init() { - log.SetFlags(0) - - flag.BoolVar(&flagTypes, "types", flagTypes, - "When set, the types of every defined key will be shown.") - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s toml-file [ toml-file ... ]\n", - path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() < 1 { - flag.Usage() - } - for _, f := range flag.Args() { - var tmp interface{} - md, err := toml.DecodeFile(f, &tmp) - if err != nil { - log.Fatalf("Error in '%s': %s", f, err) - } - if flagTypes { - printTypes(md) - } - } -} - -func printTypes(md toml.MetaData) { - tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - for _, key := range md.Keys() { - fmt.Fprintf(tabw, "%s%s\t%s\n", - strings.Repeat(" ", len(key)-1), key, md.Type(key...)) - } - tabw.Flush() -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go deleted file mode 100644 index 3805931..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go +++ /dev/null @@ -1,950 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "reflect" - "testing" - "time" -) - -func init() { - log.SetFlags(0) -} - -func TestDecodeSimple(t *testing.T) { - var testSimple = ` -age = 250 -andrew = "gallant" -kait = "brady" -now = 1987-07-05T05:45:00Z -yesOrNo = true -pi = 3.14 -colors = [ - ["red", "green", "blue"], - ["cyan", "magenta", "yellow", "black"], -] - -[My.Cats] -plato = "cat 1" -cauchy = "cat 2" -` - - type cats struct { - Plato string - Cauchy string - } - type simple struct { - Age int - Colors [][]string - Pi float64 - YesOrNo bool - Now time.Time - Andrew string - Kait string - My map[string]cats - } - - var val simple - _, err := Decode(testSimple, &val) - if err != nil { - t.Fatal(err) - } - - now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00") - if err != nil { - panic(err) - } - var answer = simple{ - Age: 250, - Andrew: "gallant", - Kait: "brady", - Now: now, - YesOrNo: true, - Pi: 3.14, - Colors: [][]string{ - {"red", "green", "blue"}, - {"cyan", "magenta", "yellow", "black"}, - }, - My: map[string]cats{ - "Cats": cats{Plato: "cat 1", Cauchy: "cat 2"}, - }, - } - if !reflect.DeepEqual(val, answer) { - t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n", - answer, val) - } -} - -func TestDecodeEmbedded(t *testing.T) { - type Dog struct{ Name string } - type Age int - - tests := map[string]struct { - input string - decodeInto interface{} - wantDecoded interface{} - }{ - "embedded struct": { - input: `Name = "milton"`, - decodeInto: &struct{ Dog }{}, - wantDecoded: &struct{ Dog }{Dog{"milton"}}, - }, - "embedded non-nil pointer to struct": { - input: `Name = "milton"`, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, - }, - "embedded nil pointer to struct": { - input: ``, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{nil}, - }, - "embedded int": { - input: `Age = -5`, - decodeInto: &struct{ Age }{}, - wantDecoded: &struct{ Age }{-5}, - }, - } - - for label, test := range tests { - _, err := Decode(test.input, test.decodeInto) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { - t.Errorf("%s: want decoded == %+v, got %+v", - label, test.wantDecoded, test.decodeInto) - } - } -} - -func TestTableArrays(t *testing.T) { - var tomlTableArrays = ` -[[albums]] -name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] -name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - - type Song struct { - Name string - } - - type Album struct { - Name string - Songs []Song - } - - type Music struct { - Albums []Album - } - - expected := Music{[]Album{ - {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }} - var got Music - if _, err := Decode(tomlTableArrays, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -// Case insensitive matching tests. -// A bit more comprehensive than needed given the current implementation, -// but implementations change. -// Probably still missing demonstrations of some ugly corner cases regarding -// case insensitive matching and multiple fields. -func TestCase(t *testing.T) { - var caseToml = ` -tOpString = "string" -tOpInt = 1 -tOpFloat = 1.1 -tOpBool = true -tOpdate = 2006-01-02T15:04:05Z -tOparray = [ "array" ] -Match = "i should be in Match only" -MatcH = "i should be in MatcH only" -once = "just once" -[nEst.eD] -nEstedString = "another string" -` - - type InsensitiveEd struct { - NestedString string - } - - type InsensitiveNest struct { - Ed InsensitiveEd - } - - type Insensitive struct { - TopString string - TopInt int - TopFloat float64 - TopBool bool - TopDate time.Time - TopArray []string - Match string - MatcH string - Once string - OncE string - Nest InsensitiveNest - } - - tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - panic(err) - } - expected := Insensitive{ - TopString: "string", - TopInt: 1, - TopFloat: 1.1, - TopBool: true, - TopDate: tme, - TopArray: []string{"array"}, - MatcH: "i should be in MatcH only", - Match: "i should be in Match only", - Once: "just once", - OncE: "", - Nest: InsensitiveNest{ - Ed: InsensitiveEd{NestedString: "another string"}, - }, - } - var got Insensitive - if _, err := Decode(caseToml, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestPointers(t *testing.T) { - type Object struct { - Type string - Description string - } - - type Dict struct { - NamedObject map[string]*Object - BaseObject *Object - Strptr *string - Strptrs []*string - } - s1, s2, s3 := "blah", "abc", "def" - expected := &Dict{ - Strptr: &s1, - Strptrs: []*string{&s2, &s3}, - NamedObject: map[string]*Object{ - "foo": {"FOO", "fooooo!!!"}, - "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"}, - }, - BaseObject: &Object{"BASE", "da base"}, - } - - ex1 := ` -Strptr = "blah" -Strptrs = ["abc", "def"] - -[NamedObject.foo] -Type = "FOO" -Description = "fooooo!!!" - -[NamedObject.bar] -Type = "BAR" -Description = "ba-ba-ba-ba-barrrr!!!" - -[BaseObject] -Type = "BASE" -Description = "da base" -` - dict := new(Dict) - _, err := Decode(ex1, dict) - if err != nil { - t.Errorf("Decode error: %v", err) - } - if !reflect.DeepEqual(expected, dict) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict) - } -} - -type sphere struct { - Center [3]float64 - Radius float64 -} - -func TestDecodeSimpleArray(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil { - t.Fatal(err) - } -} - -func TestDecodeArrayWrongSize(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil { - t.Fatal("Expected array type mismatch error") - } -} - -func TestDecodeLargeIntoSmallInt(t *testing.T) { - type table struct { - Value int8 - } - var tab table - if _, err := Decode(`value = 500`, &tab); err == nil { - t.Fatal("Expected integer out-of-bounds error.") - } -} - -func TestDecodeSizedInts(t *testing.T) { - type table struct { - U8 uint8 - U16 uint16 - U32 uint32 - U64 uint64 - U uint - I8 int8 - I16 int16 - I32 int32 - I64 int64 - I int - } - answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1} - toml := ` - u8 = 1 - u16 = 1 - u32 = 1 - u64 = 1 - u = 1 - i8 = -1 - i16 = -1 - i32 = -1 - i64 = -1 - i = -1 - ` - var tab table - if _, err := Decode(toml, &tab); err != nil { - t.Fatal(err.Error()) - } - if answer != tab { - t.Fatalf("Expected %#v but got %#v", answer, tab) - } -} - -func TestUnmarshaler(t *testing.T) { - - var tomlBlob = ` -[dishes.hamboogie] -name = "Hamboogie with fries" -price = 10.99 - -[[dishes.hamboogie.ingredients]] -name = "Bread Bun" - -[[dishes.hamboogie.ingredients]] -name = "Lettuce" - -[[dishes.hamboogie.ingredients]] -name = "Real Beef Patty" - -[[dishes.hamboogie.ingredients]] -name = "Tomato" - -[dishes.eggsalad] -name = "Egg Salad with rice" -price = 3.99 - -[[dishes.eggsalad.ingredients]] -name = "Egg" - -[[dishes.eggsalad.ingredients]] -name = "Mayo" - -[[dishes.eggsalad.ingredients]] -name = "Rice" -` - m := &menu{} - if _, err := Decode(tomlBlob, m); err != nil { - log.Fatal(err) - } - - if len(m.Dishes) != 2 { - t.Log("two dishes should be loaded with UnmarshalTOML()") - t.Errorf("expected %d but got %d", 2, len(m.Dishes)) - } - - eggSalad := m.Dishes["eggsalad"] - if _, ok := interface{}(eggSalad).(dish); !ok { - t.Errorf("expected a dish") - } - - if eggSalad.Name != "Egg Salad with rice" { - t.Errorf("expected the dish to be named 'Egg Salad with rice'") - } - - if len(eggSalad.Ingredients) != 3 { - t.Log("dish should be loaded with UnmarshalTOML()") - t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients)) - } - - found := false - for _, i := range eggSalad.Ingredients { - if i.Name == "Rice" { - found = true - break - } - } - if !found { - t.Error("Rice was not loaded in UnmarshalTOML()") - } - - // test on a value - must be passed as * - o := menu{} - if _, err := Decode(tomlBlob, &o); err != nil { - log.Fatal(err) - } - -} - -type menu struct { - Dishes map[string]dish -} - -func (m *menu) UnmarshalTOML(p interface{}) error { - m.Dishes = make(map[string]dish) - data, _ := p.(map[string]interface{}) - dishes := data["dishes"].(map[string]interface{}) - for n, v := range dishes { - if d, ok := v.(map[string]interface{}); ok { - nd := dish{} - nd.UnmarshalTOML(d) - m.Dishes[n] = nd - } else { - return fmt.Errorf("not a dish") - } - } - return nil -} - -type dish struct { - Name string - Price float32 - Ingredients []ingredient -} - -func (d *dish) UnmarshalTOML(p interface{}) error { - data, _ := p.(map[string]interface{}) - d.Name, _ = data["name"].(string) - d.Price, _ = data["price"].(float32) - ingredients, _ := data["ingredients"].([]map[string]interface{}) - for _, e := range ingredients { - n, _ := interface{}(e).(map[string]interface{}) - name, _ := n["name"].(string) - i := ingredient{name} - d.Ingredients = append(d.Ingredients, i) - } - return nil -} - -type ingredient struct { - Name string -} - -func ExampleMetaData_PrimitiveDecode() { - var md MetaData - var err error - - var tomlBlob = ` -ranking = ["Springsteen", "J Geils"] - -[bands.Springsteen] -started = 1973 -albums = ["Greetings", "WIESS", "Born to Run", "Darkness"] - -[bands."J Geils"] -started = 1970 -albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"] -` - - type band struct { - Started int - Albums []string - } - type classics struct { - Ranking []string - Bands map[string]Primitive - } - - // Do the initial decode. Reflection is delayed on Primitive values. - var music classics - if md, err = Decode(tomlBlob, &music); err != nil { - log.Fatal(err) - } - - // MetaData still includes information on Primitive values. - fmt.Printf("Is `bands.Springsteen` defined? %v\n", - md.IsDefined("bands", "Springsteen")) - - // Decode primitive data into Go values. - for _, artist := range music.Ranking { - // A band is a primitive value, so we need to decode it to get a - // real `band` value. - primValue := music.Bands[artist] - - var aBand band - if err = md.PrimitiveDecode(primValue, &aBand); err != nil { - log.Fatal(err) - } - fmt.Printf("%s started in %d.\n", artist, aBand.Started) - } - // Check to see if there were any fields left undecoded. - // Note that this won't be empty before decoding the Primitive value! - fmt.Printf("Undecoded: %q\n", md.Undecoded()) - - // Output: - // Is `bands.Springsteen` defined? true - // Springsteen started in 1973. - // J Geils started in 1970. - // Undecoded: [] -} - -func ExampleDecode() { - var tomlBlob = ` -# Some comments. -[alpha] -ip = "10.0.0.1" - - [alpha.config] - Ports = [ 8001, 8002 ] - Location = "Toronto" - Created = 1987-07-05T05:45:00Z - -[beta] -ip = "10.0.0.2" - - [beta.config] - Ports = [ 9001, 9002 ] - Location = "New Jersey" - Created = 1887-01-05T05:55:00Z -` - - type serverConfig struct { - Ports []int - Location string - Created time.Time - } - - type server struct { - IP string `toml:"ip"` - Config serverConfig `toml:"config"` - } - - type servers map[string]server - - var config servers - if _, err := Decode(tomlBlob, &config); err != nil { - log.Fatal(err) - } - - for _, name := range []string{"alpha", "beta"} { - s := config[name] - fmt.Printf("Server: %s (ip: %s) in %s created on %s\n", - name, s.IP, s.Config.Location, - s.Config.Created.Format("2006-01-02")) - fmt.Printf("Ports: %v\n", s.Config.Ports) - } - - // Output: - // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05 - // Ports: [8001 8002] - // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05 - // Ports: [9001 9002] -} - -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} - -// Example Unmarshaler shows how to decode TOML strings into your own -// custom data type. -func Example_unmarshaler() { - blob := ` -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -` - type song struct { - Name string - Duration duration - } - type songs struct { - Song []song - } - var favorites songs - if _, err := Decode(blob, &favorites); err != nil { - log.Fatal(err) - } - - // Code to implement the TextUnmarshaler interface for `duration`: - // - // type duration struct { - // time.Duration - // } - // - // func (d *duration) UnmarshalText(text []byte) error { - // var err error - // d.Duration, err = time.ParseDuration(string(text)) - // return err - // } - - for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) - } - // Output: - // Thunder Road (4m49s) - // Stairway to Heaven (8m3s) -} - -// Example StrictDecoding shows how to detect whether there are keys in the -// TOML document that weren't decoded into the value given. This is useful -// for returning an error to the user if they've included extraneous fields -// in their configuration. -func Example_strictDecoding() { - var blob = ` -key1 = "value1" -key2 = "value2" -key3 = "value3" -` - type config struct { - Key1 string - Key3 string - } - - var conf config - md, err := Decode(blob, &conf) - if err != nil { - log.Fatal(err) - } - fmt.Printf("Undecoded keys: %q\n", md.Undecoded()) - // Output: - // Undecoded keys: ["key2"] -} - -// Example UnmarshalTOML shows how to implement a struct type that knows how to -// unmarshal itself. The struct must take full responsibility for mapping the -// values passed into the struct. The method may be used with interfaces in a -// struct in cases where the actual type is not known until the data is -// examined. -func Example_unmarshalTOML() { - - var blob = ` -[[parts]] -type = "valve" -id = "valve-1" -size = 1.2 -rating = 4 - -[[parts]] -type = "valve" -id = "valve-2" -size = 2.1 -rating = 5 - -[[parts]] -type = "pipe" -id = "pipe-1" -length = 2.1 -diameter = 12 - -[[parts]] -type = "cable" -id = "cable-1" -length = 12 -rating = 3.1 -` - o := &order{} - err := Unmarshal([]byte(blob), o) - if err != nil { - log.Fatal(err) - } - - fmt.Println(len(o.parts)) - - for _, part := range o.parts { - fmt.Println(part.Name()) - } - - // Code to implement UmarshalJSON. - - // type order struct { - // // NOTE `order.parts` is a private slice of type `part` which is an - // // interface and may only be loaded from toml using the - // // UnmarshalTOML() method of the Umarshaler interface. - // parts parts - // } - - // func (o *order) UnmarshalTOML(data interface{}) error { - - // // NOTE the example below contains detailed type casting to show how - // // the 'data' is retrieved. In operational use, a type cast wrapper - // // may be prefered e.g. - // // - // // func AsMap(v interface{}) (map[string]interface{}, error) { - // // return v.(map[string]interface{}) - // // } - // // - // // resulting in: - // // d, _ := AsMap(data) - // // - - // d, _ := data.(map[string]interface{}) - // parts, _ := d["parts"].([]map[string]interface{}) - - // for _, p := range parts { - - // typ, _ := p["type"].(string) - // id, _ := p["id"].(string) - - // // detect the type of part and handle each case - // switch p["type"] { - // case "valve": - - // size := float32(p["size"].(float64)) - // rating := int(p["rating"].(int64)) - - // valve := &valve{ - // Type: typ, - // ID: id, - // Size: size, - // Rating: rating, - // } - - // o.parts = append(o.parts, valve) - - // case "pipe": - - // length := float32(p["length"].(float64)) - // diameter := int(p["diameter"].(int64)) - - // pipe := &pipe{ - // Type: typ, - // ID: id, - // Length: length, - // Diameter: diameter, - // } - - // o.parts = append(o.parts, pipe) - - // case "cable": - - // length := int(p["length"].(int64)) - // rating := float32(p["rating"].(float64)) - - // cable := &cable{ - // Type: typ, - // ID: id, - // Length: length, - // Rating: rating, - // } - - // o.parts = append(o.parts, cable) - - // } - // } - - // return nil - // } - - // type parts []part - - // type part interface { - // Name() string - // } - - // type valve struct { - // Type string - // ID string - // Size float32 - // Rating int - // } - - // func (v *valve) Name() string { - // return fmt.Sprintf("VALVE: %s", v.ID) - // } - - // type pipe struct { - // Type string - // ID string - // Length float32 - // Diameter int - // } - - // func (p *pipe) Name() string { - // return fmt.Sprintf("PIPE: %s", p.ID) - // } - - // type cable struct { - // Type string - // ID string - // Length int - // Rating float32 - // } - - // func (c *cable) Name() string { - // return fmt.Sprintf("CABLE: %s", c.ID) - // } - - // Output: - // 4 - // VALVE: valve-1 - // VALVE: valve-2 - // PIPE: pipe-1 - // CABLE: cable-1 - -} - -type order struct { - // NOTE `order.parts` is a private slice of type `part` which is an - // interface and may only be loaded from toml using the UnmarshalTOML() - // method of the Umarshaler interface. - parts parts -} - -func (o *order) UnmarshalTOML(data interface{}) error { - - // NOTE the example below contains detailed type casting to show how - // the 'data' is retrieved. In operational use, a type cast wrapper - // may be prefered e.g. - // - // func AsMap(v interface{}) (map[string]interface{}, error) { - // return v.(map[string]interface{}) - // } - // - // resulting in: - // d, _ := AsMap(data) - // - - d, _ := data.(map[string]interface{}) - parts, _ := d["parts"].([]map[string]interface{}) - - for _, p := range parts { - - typ, _ := p["type"].(string) - id, _ := p["id"].(string) - - // detect the type of part and handle each case - switch p["type"] { - case "valve": - - size := float32(p["size"].(float64)) - rating := int(p["rating"].(int64)) - - valve := &valve{ - Type: typ, - ID: id, - Size: size, - Rating: rating, - } - - o.parts = append(o.parts, valve) - - case "pipe": - - length := float32(p["length"].(float64)) - diameter := int(p["diameter"].(int64)) - - pipe := &pipe{ - Type: typ, - ID: id, - Length: length, - Diameter: diameter, - } - - o.parts = append(o.parts, pipe) - - case "cable": - - length := int(p["length"].(int64)) - rating := float32(p["rating"].(float64)) - - cable := &cable{ - Type: typ, - ID: id, - Length: length, - Rating: rating, - } - - o.parts = append(o.parts, cable) - - } - } - - return nil -} - -type parts []part - -type part interface { - Name() string -} - -type valve struct { - Type string - ID string - Size float32 - Rating int -} - -func (v *valve) Name() string { - return fmt.Sprintf("VALVE: %s", v.ID) -} - -type pipe struct { - Type string - ID string - Length float32 - Diameter int -} - -func (p *pipe) Name() string { - return fmt.Sprintf("PIPE: %s", p.ID) -} - -type cable struct { - Type string - ID string - Length int - Rating float32 -} - -func (c *cable) Name() string { - return fmt.Sprintf("CABLE: %s", c.ID) -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go deleted file mode 100644 index 445ca8e..0000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go +++ /dev/null @@ -1,542 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "log" - "net" - "testing" - "time" -) - -func TestEncodeRoundTrip(t *testing.T) { - type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time - Ipaddress net.IP - } - - var inputs = Config{ - 13, - []string{"one", "two", "three"}, - 3.145, - []int{11, 2, 3, 4}, - time.Now(), - net.ParseIP("192.168.59.254"), - } - - var firstBuffer bytes.Buffer - e := NewEncoder(&firstBuffer) - err := e.Encode(inputs) - if err != nil { - t.Fatal(err) - } - var outputs Config - if _, err := Decode(firstBuffer.String(), &outputs); err != nil { - log.Printf("Could not decode:\n-----\n%s\n-----\n", - firstBuffer.String()) - t.Fatal(err) - } - - // could test each value individually, but I'm lazy - var secondBuffer bytes.Buffer - e2 := NewEncoder(&secondBuffer) - err = e2.Encode(outputs) - if err != nil { - t.Fatal(err) - } - if firstBuffer.String() != secondBuffer.String() { - t.Error( - firstBuffer.String(), - "\n\n is not identical to\n\n", - secondBuffer.String()) - } -} - -// XXX(burntsushi) -// I think these tests probably should be removed. They are good, but they -// ought to be obsolete by toml-test. -func TestEncode(t *testing.T) { - type Embedded struct { - Int int `toml:"_int"` - } - type NonStruct int - - date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600)) - dateStr := "2014-05-11T19:30:40Z" - - tests := map[string]struct { - input interface{} - wantOutput string - wantError error - }{ - "bool field": { - input: struct { - BoolTrue bool - BoolFalse bool - }{true, false}, - wantOutput: "BoolTrue = true\nBoolFalse = false\n", - }, - "int fields": { - input: struct { - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - }{1, 2, 3, 4, 5}, - wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n", - }, - "uint fields": { - input: struct { - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - }{1, 2, 3, 4, 5}, - wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" + - "\nUint64 = 5\n", - }, - "float fields": { - input: struct { - Float32 float32 - Float64 float64 - }{1.5, 2.5}, - wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n", - }, - "string field": { - input: struct{ String string }{"foo"}, - wantOutput: "String = \"foo\"\n", - }, - "string field and unexported field": { - input: struct { - String string - unexported int - }{"foo", 0}, - wantOutput: "String = \"foo\"\n", - }, - "datetime field in UTC": { - input: struct{ Date time.Time }{date}, - wantOutput: fmt.Sprintf("Date = %s\n", dateStr), - }, - "datetime field as primitive": { - // Using a map here to fail if isStructOrMap() returns true for - // time.Time. - input: map[string]interface{}{ - "Date": date, - "Int": 1, - }, - wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr), - }, - "array fields": { - input: struct { - IntArray0 [0]int - IntArray3 [3]int - }{[0]int{}, [3]int{1, 2, 3}}, - wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n", - }, - "slice fields": { - input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{ - nil, []int{}, []int{1, 2, 3}, - }, - wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n", - }, - "datetime slices": { - input: struct{ DatetimeSlice []time.Time }{ - []time.Time{date, date}, - }, - wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n", - dateStr, dateStr), - }, - "nested arrays and slices": { - input: struct { - SliceOfArrays [][2]int - ArrayOfSlices [2][]int - SliceOfArraysOfSlices [][2][]int - ArrayOfSlicesOfArrays [2][][2]int - SliceOfMixedArrays [][2]interface{} - ArrayOfMixedSlices [2][]interface{} - }{ - [][2]int{{1, 2}, {3, 4}}, - [2][]int{{1, 2}, {3, 4}}, - [][2][]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [2][][2]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [][2]interface{}{ - {1, 2}, {"a", "b"}, - }, - [2][]interface{}{ - {1, 2}, {"a", "b"}, - }, - }, - wantOutput: `SliceOfArrays = [[1, 2], [3, 4]] -ArrayOfSlices = [[1, 2], [3, 4]] -SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -SliceOfMixedArrays = [[1, 2], ["a", "b"]] -ArrayOfMixedSlices = [[1, 2], ["a", "b"]] -`, - }, - "empty slice": { - input: struct{ Empty []interface{} }{[]interface{}{}}, - wantOutput: "Empty = []\n", - }, - "(error) slice with element type mismatch (string and integer)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}}, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with element type mismatch (integer and float)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}}, - wantError: errArrayMixedElementTypes, - }, - "slice with elems of differing Go types, same TOML types": { - input: struct { - MixedInts []interface{} - MixedFloats []interface{} - }{ - []interface{}{ - int(1), int8(2), int16(3), int32(4), int64(5), - uint(1), uint8(2), uint16(3), uint32(4), uint64(5), - }, - []interface{}{float32(1.5), float64(2.5)}, - }, - wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" + - "MixedFloats = [1.5, 2.5]\n", - }, - "(error) slice w/ element type mismatch (one is nested array)": { - input: struct{ Mixed []interface{} }{ - []interface{}{1, []interface{}{2}}, - }, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with 1 nil element": { - input: struct{ NilElement1 []interface{} }{[]interface{}{nil}}, - wantError: errArrayNilElement, - }, - "(error) slice with 1 nil element (and other non-nil elements)": { - input: struct{ NilElement []interface{} }{ - []interface{}{1, nil}, - }, - wantError: errArrayNilElement, - }, - "simple map": { - input: map[string]int{"a": 1, "b": 2}, - wantOutput: "a = 1\nb = 2\n", - }, - "map with interface{} value type": { - input: map[string]interface{}{"a": 1, "b": "c"}, - wantOutput: "a = 1\nb = \"c\"\n", - }, - "map with interface{} value type, some of which are structs": { - input: map[string]interface{}{ - "a": struct{ Int int }{2}, - "b": 1, - }, - wantOutput: "b = 1\n\n[a]\n Int = 2\n", - }, - "nested map": { - input: map[string]map[string]int{ - "a": {"b": 1}, - "c": {"d": 2}, - }, - wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n", - }, - "nested struct": { - input: struct{ Struct struct{ Int int } }{ - struct{ Int int }{1}, - }, - wantOutput: "[Struct]\n Int = 1\n", - }, - "nested struct and non-struct field": { - input: struct { - Struct struct{ Int int } - Bool bool - }{struct{ Int int }{1}, true}, - wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n", - }, - "2 nested structs": { - input: struct{ Struct1, Struct2 struct{ Int int } }{ - struct{ Int int }{1}, struct{ Int int }{2}, - }, - wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n", - }, - "deeply nested structs": { - input: struct { - Struct1, Struct2 struct{ Struct3 *struct{ Int int } } - }{ - struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}}, - struct{ Struct3 *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" + - "\n\n[Struct2]\n", - }, - "nested struct with nil struct elem": { - input: struct { - Struct struct{ Inner *struct{ Int int } } - }{ - struct{ Inner *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct]\n", - }, - "nested struct with no fields": { - input: struct { - Struct struct{ Inner struct{} } - }{ - struct{ Inner struct{} }{struct{}{}}, - }, - wantOutput: "[Struct]\n [Struct.Inner]\n", - }, - "struct with tags": { - input: struct { - Struct struct { - Int int `toml:"_int"` - } `toml:"_struct"` - Bool bool `toml:"_bool"` - }{ - struct { - Int int `toml:"_int"` - }{1}, true, - }, - wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n", - }, - "embedded struct": { - input: struct{ Embedded }{Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "embedded *struct": { - input: struct{ *Embedded }{&Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "nested embedded struct": { - input: struct { - Struct struct{ Embedded } `toml:"_struct"` - }{struct{ Embedded }{Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "nested embedded *struct": { - input: struct { - Struct struct{ *Embedded } `toml:"_struct"` - }{struct{ *Embedded }{&Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "array of tables": { - input: struct { - Structs []*struct{ Int int } `toml:"struct"` - }{ - []*struct{ Int int }{{1}, {3}}, - }, - wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n", - }, - "array of tables order": { - input: map[string]interface{}{ - "map": map[string]interface{}{ - "zero": 5, - "arr": []map[string]int{ - map[string]int{ - "friend": 5, - }, - }, - }, - }, - wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n", - }, - "(error) top-level slice": { - input: []struct{ Int int }{{1}, {2}, {3}}, - wantError: errNoKey, - }, - "(error) slice of slice": { - input: struct { - Slices [][]struct{ Int int } - }{ - [][]struct{ Int int }{{{1}}, {{2}}, {{3}}}, - }, - wantError: errArrayNoTable, - }, - "(error) map no string key": { - input: map[int]string{1: ""}, - wantError: errNonString, - }, - "(error) anonymous non-struct": { - input: struct{ NonStruct }{5}, - wantError: errAnonNonStruct, - }, - "(error) empty key name": { - input: map[string]int{"": 1}, - wantError: errAnything, - }, - "(error) empty map name": { - input: map[string]interface{}{ - "": map[string]int{"v": 1}, - }, - wantError: errAnything, - }, - } - for label, test := range tests { - encodeExpected(t, label, test.input, test.wantOutput, test.wantError) - } -} - -func TestEncodeNestedTableArrays(t *testing.T) { - type song struct { - Name string `toml:"name"` - } - type album struct { - Name string `toml:"name"` - Songs []song `toml:"songs"` - } - type springsteen struct { - Albums []album `toml:"albums"` - } - value := springsteen{ - []album{ - {"Born to Run", - []song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", - []song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }, - } - expected := `[[albums]] - name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] - name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - encodeExpected(t, "nested table arrays", value, expected, nil) -} - -func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) { - type Alpha struct { - V int - } - type Beta struct { - V int - } - type Conf struct { - V int - A Alpha - B []Beta - } - - val := Conf{ - V: 1, - A: Alpha{2}, - B: []Beta{{3}}, - } - expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n" - encodeExpected(t, "array hash with normal hash order", val, expected, nil) -} - -func TestEncodeWithOmitEmpty(t *testing.T) { - type simple struct { - User string `toml:"user"` - Pass string `toml:"password,omitempty"` - } - - value := simple{"Testing", ""} - expected := fmt.Sprintf("user = %q\n", value.User) - encodeExpected(t, "simple with omitempty, is empty", value, expected, nil) - value.Pass = "some password" - expected = fmt.Sprintf("user = %q\npassword = %q\n", value.User, value.Pass) - encodeExpected(t, "simple with omitempty, not empty", value, expected, nil) -} - -func TestEncodeWithOmitZero(t *testing.T) { - type simple struct { - Number int `toml:"number,omitzero"` - Real float64 `toml:"real,omitzero"` - Unsigned uint `toml:"unsigned,omitzero"` - } - - value := simple{0, 0.0, uint(0)} - expected := "" - - encodeExpected(t, "simple with omitzero, all zero", value, expected, nil) - - value.Number = 10 - value.Real = 20 - value.Unsigned = 5 - expected = `number = 10 -real = 20.0 -unsigned = 5 -` - encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil) -} - -func encodeExpected( - t *testing.T, label string, val interface{}, wantStr string, wantErr error, -) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - err := enc.Encode(val) - if err != wantErr { - if wantErr != nil { - if wantErr == errAnything && err != nil { - return - } - t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err) - } else { - t.Errorf("%s: Encode failed: %s", label, err) - } - } - if err != nil { - return - } - if got := buf.String(); wantStr != got { - t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n", - label, wantStr, got) - } -} - -func ExampleEncoder_Encode() { - date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC") - var config = map[string]interface{}{ - "date": date, - "counts": []int{1, 1, 2, 3, 5, 8}, - "hash": map[string]string{ - "key1": "val1", - "key2": "val2", - }, - } - buf := new(bytes.Buffer) - if err := NewEncoder(buf).Encode(config); err != nil { - log.Fatal(err) - } - fmt.Println(buf.String()) - - // Output: - // counts = [1, 1, 2, 3, 5, 8] - // date = 2010-03-14T18:00:00Z - // - // [hash] - // key1 = "val1" - // key2 = "val2" -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore b/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore deleted file mode 100644 index c7bd2b7..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.prof -*.test -*.swp -/bin/ diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile b/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile deleted file mode 100644 index cfbed51..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,54 +0,0 @@ -TEST=. -BENCH=. -COVERPROFILE=/tmp/c.out -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -bench: - go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) - -# http://cloc.sourceforge.net/ -cloc: - @cloc --not-match-f='Makefile|_test.go' . - -cover: fmt - go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . - go tool cover -html=$(COVERPROFILE) - rm $(COVERPROFILE) - -cpuprofile: fmt - @go test -c - @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof - -# go get github.com/kisielk/errcheck -errcheck: - @echo "=== errcheck ===" - @errcheck github.com/boltdb/bolt - -fmt: - @go fmt ./... - -get: - @go get -d ./... - -build: get - @mkdir -p bin - @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt - -test: fmt - @go get github.com/stretchr/testify/assert - @echo "=== TESTS ===" - @go test -v -cover -test.run=$(TEST) - @echo "" - @echo "" - @echo "=== CLI ===" - @go test -v -test.run=$(TEST) ./cmd/bolt - @echo "" - @echo "" - @echo "=== RACE DETECTOR ===" - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -.PHONY: bench cloc cover cpuprofile fmt memprofile test diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md b/Godeps/_workspace/src/github.com/boltdb/bolt/README.md deleted file mode 100644 index 00fad6a..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md +++ /dev/null @@ -1,621 +0,0 @@ -Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) -==== - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and -the [LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - - -## Project Status - -Bolt is stable and the API is fixed. Full unit test coverage and randomized -black box testing are used to ensure database consistency and thread safety. -Bolt is currently in high-load production environments serving databases as -large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed -services every day. - - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get github.com/boltdb/bolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - "github.com/boltdb/bolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but _please_ be sure to close the -transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guarenteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -When you have iterated to the end of the cursor then `Next()` will return `nil`. -You must seek to a position using `First()`, `Last()`, or `Seek()` before -calling `Next()` or `Prev()`. If you do not seek to a position then these -functions will return `nil`. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. It will also use `O_DIRECT` when available -to prevent page cache trashing. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can add a write-ahead log or - [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt - to mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. -* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go deleted file mode 100644 index 84acae6..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go +++ /dev/null @@ -1,138 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go deleted file mode 100644 index b745a37..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "hash/fnv" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func validateBatchBench(b *testing.B, db *TestDB) { - var rollback = errors.New("sentinel error to cause rollback") - validate := func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte("bench")) - h := fnv.New32a() - buf := make([]byte, 4) - for id := uint32(0); id < 1000; id++ { - binary.LittleEndian.PutUint32(buf, id) - h.Reset() - h.Write(buf[:]) - k := h.Sum(nil) - v := bucket.Get(k) - if v == nil { - b.Errorf("not found id=%d key=%x", id, k) - continue - } - if g, e := v, []byte("filler"); !bytes.Equal(g, e) { - b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) - } - if err := bucket.Delete(k); err != nil { - return err - } - } - // should be empty now - c := bucket.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - b.Errorf("unexpected key: %x = %q", k, v) - } - return rollback - } - if err := db.Update(validate); err != nil && err != rollback { - b.Error(err) - } -} - -func BenchmarkDBBatchAutomatic(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Batch(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchSingle(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Update(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchManual10x100(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for major := 0; major < 10; major++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - insert100 := func(tx *bolt.Tx) error { - h := fnv.New32a() - buf := make([]byte, 4) - for minor := uint32(0); minor < 100; minor++ { - binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) - h.Reset() - h.Write(buf[:]) - k := h.Sum(nil) - b := tx.Bucket([]byte("bench")) - if err := b.Put(k, []byte("filler")); err != nil { - return err - } - } - return nil - } - if err := db.Update(insert100); err != nil { - b.Fatal(err) - } - }(uint32(major)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go deleted file mode 100644 index 74eff8a..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package bolt_test - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "log" - "math/rand" - "net/http" - "net/http/httptest" - "os" - - "github.com/boltdb/bolt" -) - -// Set this to see how the counts are actually updated. -const verbose = false - -// Counter updates a counter in Bolt for every URL path requested. -type counter struct { - db *bolt.DB -} - -func (c counter) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - // Communicates the new count from a successful database - // transaction. - var result uint64 - - increment := func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("hits")) - if err != nil { - return err - } - key := []byte(req.URL.String()) - // Decode handles key not found for us. - count := decode(b.Get(key)) + 1 - b.Put(key, encode(count)) - // All good, communicate new count. - result = count - return nil - } - if err := c.db.Batch(increment); err != nil { - http.Error(rw, err.Error(), 500) - return - } - - if verbose { - log.Printf("server: %s: %d", req.URL.String(), result) - } - - rw.Header().Set("Content-Type", "application/octet-stream") - fmt.Fprintf(rw, "%d\n", result) -} - -func client(id int, base string, paths []string) error { - // Process paths in random order. - rng := rand.New(rand.NewSource(int64(id))) - permutation := rng.Perm(len(paths)) - - for i := range paths { - path := paths[permutation[i]] - resp, err := http.Get(base + path) - if err != nil { - return err - } - defer resp.Body.Close() - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if verbose { - log.Printf("client: %s: %s", path, buf) - } - } - return nil -} - -func ExampleDB_Batch() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start our web server - count := counter{db} - srv := httptest.NewServer(count) - defer srv.Close() - - // Decrease the batch size to make things more interesting. - db.MaxBatchSize = 3 - - // Get every path multiple times concurrently. - const clients = 10 - paths := []string{ - "/foo", - "/bar", - "/baz", - "/quux", - "/thud", - "/xyzzy", - } - errors := make(chan error, clients) - for i := 0; i < clients; i++ { - go func(id int) { - errors <- client(id, srv.URL, paths) - }(i) - } - // Check all responses to make sure there's no error. - for i := 0; i < clients; i++ { - if err := <-errors; err != nil { - fmt.Printf("client error: %v", err) - return - } - } - - // Check the final result - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("hits")) - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("hits to %s: %d\n", k, decode(v)) - } - return nil - }) - - // Output: - // hits to /bar: 10 - // hits to /baz: 10 - // hits to /foo: 10 - // hits to /quux: 10 - // hits to /thud: 10 - // hits to /xyzzy: 10 -} - -// encode marshals a counter. -func encode(n uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, n) - return buf -} - -// decode unmarshals a counter. Nil buffers are decoded as 0. -func decode(buf []byte) uint64 { - if buf == nil { - return 0 - } - return binary.BigEndian.Uint64(buf) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go deleted file mode 100644 index 0b5075f..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package bolt_test - -import ( - "testing" - "time" - - "github.com/boltdb/bolt" -) - -// Ensure two functions can perform updates in a single batch. -func TestDB_Batch(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - // Iterate over multiple updates in separate goroutines. - n := 2 - ch := make(chan error) - for i := 0; i < n; i++ { - go func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - }(i) - } - - // Check all responses to make sure there's no error. - for i := 0; i < n; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < n; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} - -func TestDB_Batch_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var sentinel int - var bork = &sentinel - var problem interface{} - var err error - - // Execute a function inside a batch that panics. - func() { - defer func() { - if p := recover(); p != nil { - problem = p - } - }() - err = db.Batch(func(tx *bolt.Tx) error { - panic(bork) - }) - }() - - // Verify there is no error. - if g, e := err, error(nil); g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } - // Verify the panic was captured. - if g, e := problem, bork; g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } -} - -func TestDB_BatchFull(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - const size = 3 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = size - // high enough to never trigger here - db.MaxBatchDelay = 1 * time.Hour - - go put(1) - go put(2) - - // Give the batch a chance to exhibit bugs. - time.Sleep(10 * time.Millisecond) - - // not triggered yet - select { - case <-ch: - t.Fatalf("batch triggered too early") - default: - } - - go put(3) - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} - -func TestDB_BatchTime(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - const size = 1 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = 1000 - db.MaxBatchDelay = 0 - - go put(1) - - // Batch must trigger by time alone. - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go deleted file mode 100644 index e659bfb..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go deleted file mode 100644 index cca6b7e..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go deleted file mode 100644 index e659bfb..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go deleted file mode 100644 index e9d1c90..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -package bolt - -import ( - "syscall" -) - -var odirect = syscall.O_DIRECT - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go deleted file mode 100644 index 7c1bef1..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go +++ /dev/null @@ -1,29 +0,0 @@ -package bolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -var odirect int - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go deleted file mode 100644 index b7bea1f..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package bolt_test - -import ( - "fmt" - "path/filepath" - "reflect" - "runtime" - "testing" -) - -// assert fails the test if the condition is false. -func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { - if !condition { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) - tb.FailNow() - } -} - -// ok fails the test if an err is not nil. -func ok(tb testing.TB, err error) { - if err != nil { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) - tb.FailNow() - } -} - -// equals fails the test if exp is not equal to act. -func equals(tb testing.TB, exp, act interface{}) { - if !reflect.DeepEqual(exp, act) { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) - tb.FailNow() - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go deleted file mode 100644 index 17ca318..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build !windows,!plan9 - -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { - return syscall.Flock(int(f.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go deleted file mode 100644 index 8b782be..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -var odirect int - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, _ bool, _ time.Duration) error { - return nil -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { - return nil -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go deleted file mode 100644 index 8db8977..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bolt - -var odirect int - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go deleted file mode 100644 index 6766992..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go +++ /dev/null @@ -1,743 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = 4294967295 -) - -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue - } - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if v == nil { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - _, _, flags := c.seek(key) - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += used - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += used - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go deleted file mode 100644 index 62b8c58..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go +++ /dev/null @@ -1,1169 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math/rand" - "os" - "strconv" - "strings" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a bucket that gets a non-existent key returns nil. -func TestBucket_Get_NonExistent(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that a bucket can read a value that is not flushed yet. -func TestBucket_Get_FromNode(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - value := b.Get([]byte("foo")) - equals(t, []byte("bar"), value) - return nil - }) -} - -// Ensure that a bucket retrieved via Get() returns a nil. -func TestBucket_Get_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that a bucket can write a key/value. -func TestBucket_Put(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - ok(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - equals(t, value, []byte("bar")) - return nil - }) -} - -// Ensure that a bucket can rewrite a key in the same transaction. -func TestBucket_Put_Repeat(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - ok(t, b.Put([]byte("foo"), []byte("bar"))) - ok(t, b.Put([]byte("foo"), []byte("baz"))) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - equals(t, value, []byte("baz")) - return nil - }) -} - -// Ensure that a bucket can write a bunch of large values. -func TestBucket_Put_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - - count, factor := 100, 200 - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - ok(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) - } - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - value := b.Get([]byte(strings.Repeat("0", i*factor))) - equals(t, []byte(strings.Repeat("X", (count-i)*factor)), value) - } - return nil - }) -} - -// Ensure that a database can perform multiple large appends safely. -func TestDB_Put_VeryLarge(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - n, batchN := 400000, 200000 - ksize, vsize := 8, 500 - - db := NewTestDB() - defer db.Close() - - for i := 0; i < n; i += batchN { - err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for j := 0; j < batchN; j++ { - k, v := make([]byte, ksize), make([]byte, vsize) - binary.BigEndian.PutUint32(k, uint32(i+j)) - ok(t, b.Put(k, v)) - } - return nil - }) - ok(t, err) - } -} - -// Ensure that a setting a value on a key with a bucket value returns an error. -func TestBucket_Put_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - return nil - }) -} - -// Ensure that a setting a value while the transaction is closed returns an error. -func TestBucket_Put_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - equals(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) -} - -// Ensure that setting a value on a read-only bucket returns an error. -func TestBucket_Put_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Put([]byte("foo"), []byte("bar")) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that a bucket can delete an existing key. -func TestBucket_Delete(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - ok(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that deleting a large set of keys will work correctly. -func TestBucket_Delete_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - var b, _ = tx.CreateBucket([]byte("widgets")) - for i := 0; i < 100; i++ { - ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) - } - return nil - }) - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - ok(t, b.Delete([]byte(strconv.Itoa(i)))) - } - return nil - }) - db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - assert(t, b.Get([]byte(strconv.Itoa(i))) == nil, "") - } - return nil - }) -} - -// Deleting a very large list of keys will cause the freelist to use overflow. -func TestBucket_Delete_FreelistOverflow(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := NewTestDB() - defer db.Close() - k := make([]byte, 16) - for i := uint64(0); i < 10000; i++ { - err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("0")) - if err != nil { - t.Fatalf("bucket error: %s", err) - } - - for j := uint64(0); j < 1000; j++ { - binary.BigEndian.PutUint64(k[:8], i) - binary.BigEndian.PutUint64(k[8:], j) - if err := b.Put(k, nil); err != nil { - t.Fatalf("put error: %s", err) - } - } - - return nil - }) - - if err != nil { - t.Fatalf("update error: %s", err) - } - } - - // Delete all of them in one large transaction - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("0")) - c := b.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - b.Delete(k) - } - return nil - }) - - // Check that a freelist overflow occurred. - ok(t, err) -} - -// Ensure that accessing and updating nested buckets is ok across transactions. -func TestBucket_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - // Create a widgets bucket. - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - - // Create a widgets/foo bucket. - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - - // Create a widgets/bar key. - ok(t, b.Put([]byte("bar"), []byte("0000"))) - - return nil - }) - db.MustCheck() - - // Update widgets/bar. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - ok(t, b.Put([]byte("bar"), []byte("xxxx"))) - return nil - }) - db.MustCheck() - - // Cause a split. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 10000; i++ { - ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) - } - return nil - }) - db.MustCheck() - - // Insert into widgets/foo/baz. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - ok(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) - return nil - }) - db.MustCheck() - - // Verify. - db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - equals(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) - equals(t, []byte("xxxx"), b.Get([]byte("bar"))) - for i := 0; i < 10000; i++ { - equals(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) - } - return nil - }) -} - -// Ensure that deleting a bucket using Delete() returns an error. -func TestBucket_Delete_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - _, err := b.CreateBucket([]byte("foo")) - ok(t, err) - equals(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) - return nil - }) -} - -// Ensure that deleting a key on a read-only bucket returns an error. -func TestBucket_Delete_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Delete([]byte("foo")) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that a deleting value while the transaction is closed returns an error. -func TestBucket_Delete_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - equals(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) -} - -// Ensure that deleting a bucket causes nested buckets to be deleted. -func TestBucket_DeleteBucket_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - ok(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) - return nil - }) -} - -// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. -func TestBucket_DeleteBucket_Nested2(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - return nil - }) - db.Update(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) != nil, "") - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")) != nil, "") - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) - ok(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) == nil, "") - return nil - }) -} - -// Ensure that deleting a child bucket with multiple pages causes all pages to get collected. -func TestBucket_DeleteBucket_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) - for i := 0; i < 1000; i++ { - ok(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) - } - return nil - }) - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) - - // NOTE: Consistency check in TestDB.Close() will panic if pages not freed properly. -} - -// Ensure that a simple value retrieved via Bucket() returns a nil. -func TestBucket_Bucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that creating a bucket on an existing non-bucket key returns an error. -func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - equals(t, bolt.ErrIncompatibleValue, err) - return nil - }) -} - -// Ensure that deleting a bucket on an existing non-bucket key returns an error. -func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) - return nil - }) -} - -// Ensure that a bucket can return an autoincrementing sequence. -func TestBucket_NextSequence(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) - - // Make sure sequence increments. - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - ok(t, err) - equals(t, seq, uint64(1)) - seq, err = tx.Bucket([]byte("widgets")).NextSequence() - ok(t, err) - equals(t, seq, uint64(2)) - - // Buckets should be separate. - seq, err = tx.Bucket([]byte("woojits")).NextSequence() - ok(t, err) - equals(t, seq, uint64(1)) - return nil - }) -} - -// Ensure that a bucket will persist an autoincrementing sequence even if its -// the only thing updated on the bucket. -// https://github.com/boltdb/bolt/issues/296 -func TestBucket_NextSequence_Persist(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, _ = tx.CreateBucket([]byte("widgets")) - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - _, _ = tx.Bucket([]byte("widgets")).NextSequence() - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } - return nil - }) -} - -// Ensure that retrieving the next sequence on a read-only bucket returns an error. -func TestBucket_NextSequence_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - i, err := b.NextSequence() - equals(t, i, uint64(0)) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that retrieving the next sequence for a bucket on a closed database return an error. -func TestBucket_NextSequence_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - _, err := b.NextSequence() - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure a user can loop over all key/value pairs in a bucket. -func TestBucket_ForEach(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002")) - - var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - switch index { - case 0: - equals(t, k, []byte("bar")) - equals(t, v, []byte("0002")) - case 1: - equals(t, k, []byte("baz")) - equals(t, v, []byte("0001")) - case 2: - equals(t, k, []byte("foo")) - equals(t, v, []byte("0000")) - } - index++ - return nil - }) - ok(t, err) - equals(t, index, 3) - return nil - }) -} - -// Ensure a database can stop iteration early. -func TestBucket_ForEach_ShortCircuit(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - - var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - index++ - if bytes.Equal(k, []byte("baz")) { - return errors.New("marker") - } - return nil - }) - equals(t, errors.New("marker"), err) - equals(t, 2, index) - return nil - }) -} - -// Ensure that looping over a bucket on a closed database returns an error. -func TestBucket_ForEach_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - err := b.ForEach(func(k, v []byte) error { return nil }) - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure that an error is returned when inserting with an empty key. -func TestBucket_Put_EmptyKey(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) - equals(t, err, bolt.ErrKeyRequired) - err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) - equals(t, err, bolt.ErrKeyRequired) - return nil - }) -} - -// Ensure that an error is returned when inserting with a key that's too large. -func TestBucket_Put_KeyTooLarge(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) - equals(t, err, bolt.ErrKeyTooLarge) - return nil - }) -} - -// Ensure that an error is returned when inserting a value that's too large. -func TestBucket_Put_ValueTooLarge(t *testing.T) { - if os.Getenv("DRONE") == "true" { - t.Skip("not enough RAM for test") - } - - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)) - equals(t, err, bolt.ErrValueTooLarge) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Add bucket with fewer keys but one big value. - big_key := []byte("really-big-value") - for i := 0; i < 500; i++ { - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) - }) - } - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put(big_key, []byte(strings.Repeat("*", 10000))) - }) - - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("woojits")) - stats := b.Stats() - equals(t, 1, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 7, stats.LeafPageN) - equals(t, 2, stats.LeafOverflowN) - equals(t, 501, stats.KeyN) - equals(t, 2, stats.Depth) - - branchInuse := 16 // branch page header - branchInuse += 7 * 16 // branch elements - branchInuse += 7 * 3 // branch keys (6 3-byte keys) - equals(t, branchInuse, stats.BranchInuse) - - leafInuse := 7 * 16 // leaf page header - leafInuse += 501 * 16 // leaf elements - leafInuse += 500*3 + len(big_key) // leaf keys - leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values - equals(t, leafInuse, stats.LeafInuse) - - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 4096, stats.BranchAlloc) - equals(t, 36864, stats.LeafAlloc) - } - - equals(t, 1, stats.BucketN) - equals(t, 0, stats.InlineBucketN) - equals(t, 0, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a bucket with random insertion utilizes fill percentage correctly. -func TestBucket_Stats_RandomFill(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } else if os.Getpagesize() != 4096 { - t.Skip("invalid page size for test") - } - - db := NewTestDB() - defer db.Close() - - // Add a set of values in random order. It will be the same random - // order so we can maintain consistency between test runs. - var count int - r := rand.New(rand.NewSource(42)) - for _, i := range r.Perm(1000) { - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - b.FillPercent = 0.9 - for _, j := range r.Perm(100) { - index := (j * 10000) + i - b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")) - count++ - } - return nil - }) - } - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - s := tx.Bucket([]byte("woojits")).Stats() - equals(t, 100000, s.KeyN) - - equals(t, 98, s.BranchPageN) - equals(t, 0, s.BranchOverflowN) - equals(t, 130984, s.BranchInuse) - equals(t, 401408, s.BranchAlloc) - - equals(t, 3412, s.LeafPageN) - equals(t, 0, s.LeafOverflowN) - equals(t, 4742482, s.LeafInuse) - equals(t, 13975552, s.LeafAlloc) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Small(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - b, err := tx.CreateBucket([]byte("whozawhats")) - ok(t, err) - b.Put([]byte("foo"), []byte("bar")) - - return nil - }) - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 0, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 1, stats.KeyN) - equals(t, 1, stats.Depth) - equals(t, 0, stats.BranchInuse) - equals(t, 0, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 0, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, 16+16+6, stats.InlineBucketInuse) - return nil - }) -} - -func TestBucket_Stats_EmptyBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - _, err := tx.CreateBucket([]byte("whozawhats")) - ok(t, err) - return nil - }) - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 0, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 0, stats.KeyN) - equals(t, 1, stats.Depth) - equals(t, 0, stats.BranchInuse) - equals(t, 0, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 0, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, 16, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - ok(t, err) - for i := 0; i < 100; i++ { - b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) - } - bar, err := b.CreateBucket([]byte("bar")) - ok(t, err) - for i := 0; i < 10; i++ { - bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) - } - baz, err := bar.CreateBucket([]byte("baz")) - ok(t, err) - for i := 0; i < 10; i++ { - baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) - } - return nil - }) - - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("foo")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 2, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 122, stats.KeyN) - equals(t, 3, stats.Depth) - equals(t, 0, stats.BranchInuse) - - foo := 16 // foo (pghdr) - foo += 101 * 16 // foo leaf elements - foo += 100*2 + 100*2 // foo leaf key/values - foo += 3 + 16 // foo -> bar key/value - - bar := 16 // bar (pghdr) - bar += 11 * 16 // bar leaf elements - bar += 10 + 10 // bar leaf key/values - bar += 3 + 16 // bar -> baz key/value - - baz := 16 // baz (inline) (pghdr) - baz += 10 * 16 // baz leaf elements - baz += 10 + 10 // baz leaf key/values - - equals(t, foo+bar+baz, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 8192, stats.LeafAlloc) - } - equals(t, 3, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, baz, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a large bucket can calculate stats. -func TestBucket_Stats_Large(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := NewTestDB() - defer db.Close() - - var index int - for i := 0; i < 100; i++ { - db.Update(func(tx *bolt.Tx) error { - // Add bucket with lots of keys. - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for i := 0; i < 1000; i++ { - b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) - index++ - } - return nil - }) - } - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - stats := b.Stats() - equals(t, 13, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 1196, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 100000, stats.KeyN) - equals(t, 3, stats.Depth) - equals(t, 25257, stats.BranchInuse) - equals(t, 2596916, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 53248, stats.BranchAlloc) - equals(t, 4898816, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 0, stats.InlineBucketN) - equals(t, 0, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure that a bucket can write random keys and values across multiple transactions. -func TestBucket_Put_Single(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - index := 0 - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - m := make(map[string][]byte) - - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - for _, item := range items { - db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { - panic("put error: " + err.Error()) - } - m[string(item.Key)] = item.Value - return nil - }) - - // Verify all key/values so far. - db.View(func(tx *bolt.Tx) error { - i := 0 - for k, v := range m { - value := tx.Bucket([]byte("widgets")).Get([]byte(k)) - if !bytes.Equal(value, v) { - t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) - db.CopyTempFile() - t.FailNow() - } - i++ - } - return nil - }) - } - - index++ - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can insert multiple key/value pairs at once. -func TestBucket_Put_Multiple(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - // Bulk insert all values. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - return nil - }) - ok(t, err) - - // Verify all items exist. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - value := b.Get(item.Key) - if !bytes.Equal(item.Value, value) { - db.CopyTempFile() - t.Fatalf("exp=%x; got=%x", item.Value, value) - } - } - return nil - }) - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can delete all key/value pairs and return to a single leaf page. -func TestBucket_Delete_Quick(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - // Bulk insert all values. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - return nil - }) - ok(t, err) - - // Remove items one at a time and check consistency. - for _, item := range items { - err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete(item.Key) - }) - ok(t, err) - } - - // Anything before our deletion index should be nil. - db.View(func(tx *bolt.Tx) error { - tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) - return nil - }) - return nil - }) - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -func ExampleBucket_Put() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - tx.CreateBucket([]byte("widgets")) - - // Set the value "bar" for the key "foo". - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Read value back in a different read-only transaction. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }) - - // Output: - // The value of 'foo' is: bar -} - -func ExampleBucket_Delete() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - - // Set the value "bar" for the key "foo". - b.Put([]byte("foo"), []byte("bar")) - - // Retrieve the key back from the database and verify it. - value := b.Get([]byte("foo")) - fmt.Printf("The value of 'foo' was: %s\n", value) - return nil - }) - - // Delete the key in a different write transaction. - db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - }) - - // Retrieve the key again. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if value == nil { - fmt.Printf("The value of 'foo' is now: nil\n") - } - return nil - }) - - // Output: - // The value of 'foo' was: bar - // The value of 'foo' is now: nil -} - -func ExampleBucket_ForEach() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Insert data into a bucket. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("animals")) - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Iterate over items in sorted key order. - b.ForEach(func(k, v []byte) error { - fmt.Printf("A %s is %s.\n", k, v) - return nil - }) - return nil - }) - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go deleted file mode 100644 index a1f2ae8..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go +++ /dev/null @@ -1,1529 +0,0 @@ -package main - -import ( - "bytes" - "encoding/binary" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" - "unsafe" - - "github.com/boltdb/bolt" -) - -var ( - // ErrUsage is returned when a usage message was printed and the process - // should simply exit with an error. - ErrUsage = errors.New("usage") - - // ErrUnknownCommand is returned when a CLI command is not specified. - ErrUnknownCommand = errors.New("unknown command") - - // ErrPathRequired is returned when the path to a Bolt database is not specified. - ErrPathRequired = errors.New("path required") - - // ErrFileNotFound is returned when a Bolt database does not exist. - ErrFileNotFound = errors.New("file not found") - - // ErrInvalidValue is returned when a benchmark reads an unexpected value. - ErrInvalidValue = errors.New("invalid value") - - // ErrCorrupt is returned when a checking a data file finds errors. - ErrCorrupt = errors.New("invalid value") - - // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly - // divided by the iteration count. - ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") - - // ErrPageIDRequired is returned when a required page id is not specified. - ErrPageIDRequired = errors.New("page id required") - - // ErrPageNotFound is returned when specifying a page above the high water mark. - ErrPageNotFound = errors.New("page not found") - - // ErrPageFreed is returned when reading a page that has already been freed. - ErrPageFreed = errors.New("page freed") -) - -// PageHeaderSize represents the size of the bolt.page header. -const PageHeaderSize = 16 - -func main() { - m := NewMain() - if err := m.Run(os.Args[1:]...); err == ErrUsage { - os.Exit(2) - } else if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } -} - -// Main represents the main program execution. -type Main struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewMain returns a new instance of Main connect to the standard input/output. -func NewMain() *Main { - return &Main{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (m *Main) Run(args ...string) error { - // Require a command at the beginning. - if len(args) == 0 || strings.HasPrefix(args[0], "-") { - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - } - - // Execute command. - switch args[0] { - case "help": - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - case "bench": - return newBenchCommand(m).Run(args[1:]...) - case "check": - return newCheckCommand(m).Run(args[1:]...) - case "dump": - return newDumpCommand(m).Run(args[1:]...) - case "info": - return newInfoCommand(m).Run(args[1:]...) - case "page": - return newPageCommand(m).Run(args[1:]...) - case "pages": - return newPagesCommand(m).Run(args[1:]...) - case "stats": - return newStatsCommand(m).Run(args[1:]...) - default: - return ErrUnknownCommand - } -} - -// Usage returns the help message. -func (m *Main) Usage() string { - return strings.TrimLeft(` -Bolt is a tool for inspecting bolt databases. - -Usage: - - bolt command [arguments] - -The commands are: - - bench run synthetic benchmark against bolt - check verifies integrity of bolt database - info print basic info - help print this screen - pages print list of pages with their types - stats iterate over all pages and generate usage stats - -Use "bolt [command] -h" for more information about a command. -`, "\n") -} - -// CheckCommand represents the "check" command execution. -type CheckCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewCheckCommand returns a CheckCommand. -func newCheckCommand(m *Main) *CheckCommand { - return &CheckCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *CheckCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Perform consistency check. - return db.View(func(tx *bolt.Tx) error { - var count int - ch := tx.Check() - loop: - for { - select { - case err, ok := <-ch: - if !ok { - break loop - } - fmt.Fprintln(cmd.Stdout, err) - count++ - } - } - - // Print summary of errors. - if count > 0 { - fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) - return ErrCorrupt - } - - // Notify user that database is valid. - fmt.Fprintln(cmd.Stdout, "OK") - return nil - }) -} - -// Usage returns the help message. -func (cmd *CheckCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt check PATH - -Check opens a database at PATH and runs an exhaustive check to verify that -all pages are accessible or are marked as freed. It also verifies that no -pages are double referenced. - -Verification errors will stream out as they are found and the process will -return after all pages have been checked. -`, "\n") -} - -// InfoCommand represents the "info" command execution. -type InfoCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewInfoCommand returns a InfoCommand. -func newInfoCommand(m *Main) *InfoCommand { - return &InfoCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *InfoCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open the database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Print basic database info. - info := db.Info() - fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) - - return nil -} - -// Usage returns the help message. -func (cmd *InfoCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt info PATH - -Info prints basic information about the Bolt database at PATH. -`, "\n") -} - -// DumpCommand represents the "dump" command execution. -type DumpCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newDumpCommand returns a DumpCommand. -func newDumpCommand(m *Main) *DumpCommand { - return &DumpCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *DumpCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database to retrieve page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return err - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================\n") - } - - // Print page to stdout. - if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { - return err - } - } - - return nil -} - -// PrintPage prints a given page as hexidecimal. -func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *DumpCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt dump -page PAGEID PATH - -Dump prints a hexidecimal dump of a single page. -`, "\n") -} - -// PageCommand represents the "page" command execution. -type PageCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newPageCommand returns a PageCommand. -func newPageCommand(m *Main) *PageCommand { - return &PageCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================\n") - } - - // Retrieve page info and page size. - p, buf, err := ReadPage(path, pageID) - if err != nil { - return err - } - - // Print basic page info. - fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) - fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) - fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) - - // Print type-specific data. - switch p.Type() { - case "meta": - err = cmd.PrintMeta(cmd.Stdout, buf) - case "leaf": - err = cmd.PrintLeaf(cmd.Stdout, buf) - case "branch": - err = cmd.PrintBranch(cmd.Stdout, buf) - case "freelist": - err = cmd.PrintFreelist(cmd.Stdout, buf) - } - if err != nil { - return err - } - } - - return nil -} - -// PrintMeta prints the data from the meta page. -func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - fmt.Fprintf(w, "Version: %d\n", m.version) - fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) - fmt.Fprintf(w, "Flags: %08x\n", m.flags) - fmt.Fprintf(w, "Root: \n", m.root.root) - fmt.Fprintf(w, "Freelist: \n", m.freelist) - fmt.Fprintf(w, "HWM: \n", m.pgid) - fmt.Fprintf(w, "Txn ID: %d\n", m.txid) - fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) - fmt.Fprintf(w, "\n") - return nil -} - -// PrintLeaf prints the data for a leaf page. -func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - // Format value as string. - var v string - if (e.flags & uint32(bucketLeafFlag)) != 0 { - b := (*bucket)(unsafe.Pointer(&e.value()[0])) - v = fmt.Sprintf("", b.root, b.sequence) - } else if isPrintable(string(e.value())) { - k = fmt.Sprintf("%q", string(e.value())) - } else { - k = fmt.Sprintf("%x", string(e.value())) - } - - fmt.Fprintf(w, "%s: %s\n", k, v) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintBranch prints the data for a leaf page. -func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.branchPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - fmt.Fprintf(w, "%s: \n", k, e.pgid) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintFreelist prints the data for a freelist page. -func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each page in the freelist. - ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) - for i := uint16(0); i < p.count; i++ { - fmt.Fprintf(w, "%d\n", ids[i]) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintPage prints a given page as hexidecimal. -func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *PageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt page -page PATH pageid [pageid...] - -Page prints one or more pages in human readable format. -`, "\n") -} - -// PagesCommand represents the "pages" command execution. -type PagesCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewPagesCommand returns a PagesCommand. -func newPagesCommand(m *Main) *PagesCommand { - return &PagesCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PagesCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer func() { _ = db.Close() }() - - // Write header. - fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") - fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") - - return db.Update(func(tx *bolt.Tx) error { - var id int - for { - p, err := tx.Page(id) - if err != nil { - return &PageError{ID: id, Err: err} - } else if p == nil { - break - } - - // Only display count and overflow if this is a non-free page. - var count, overflow string - if p.Type != "free" { - count = strconv.Itoa(p.Count) - if p.OverflowCount > 0 { - overflow = strconv.Itoa(p.OverflowCount) - } - } - - // Print table row. - fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) - - // Move to the next non-overflow page. - id += 1 - if p.Type != "free" { - id += p.OverflowCount - } - } - return nil - }) -} - -// Usage returns the help message. -func (cmd *PagesCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt pages PATH - -Pages prints a table of pages with their type (meta, leaf, branch, freelist). -Leaf and branch pages will show a key count in the "items" column while the -freelist will show the number of free pages in the "items" column. - -The "overflow" column shows the number of blocks that the page spills over -into. Normally there is no overflow but large keys and values can cause -a single page to take up multiple blocks. -`, "\n") -} - -// StatsCommand represents the "stats" command execution. -type StatsCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewStatsCommand returns a StatsCommand. -func newStatsCommand(m *Main) *StatsCommand { - return &StatsCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *StatsCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path, prefix := fs.Arg(0), fs.Arg(1) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - return db.View(func(tx *bolt.Tx) error { - var s bolt.BucketStats - var count int - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - if bytes.HasPrefix(name, []byte(prefix)) { - s.Add(b.Stats()) - count += 1 - } - return nil - }); err != nil { - return err - } - - fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) - - fmt.Fprintln(cmd.Stdout, "Page count statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) - fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) - - fmt.Fprintln(cmd.Stdout, "Tree statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) - fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) - - fmt.Fprintln(cmd.Stdout, "Page size utilization") - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) - var percentage int - if s.BranchAlloc != 0 { - percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) - percentage = 0 - if s.LeafAlloc != 0 { - percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) - - fmt.Fprintln(cmd.Stdout, "Bucket statistics") - fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) - percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) - fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) - percentage = 0 - if s.LeafInuse != 0 { - percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) - - return nil - }) -} - -// Usage returns the help message. -func (cmd *StatsCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt stats PATH - -Stats performs an extensive search of the database to track every page -reference. It starts at the current meta page and recursively iterates -through every accessible bucket. - -The following errors can be reported: - - already freed - The page is referenced more than once in the freelist. - - unreachable unfreed - The page is not referenced by a bucket or in the freelist. - - reachable freed - The page is referenced by a bucket but is also in the freelist. - - out of bounds - A page is referenced that is above the high water mark. - - multiple references - A page is referenced by more than one other page. - - invalid type - The page type is not "meta", "leaf", "branch", or "freelist". - -No errors should occur in your database. However, if for some reason you -experience corruption, please submit a ticket to the Bolt project page: - - https://github.com/boltdb/bolt/issues -`, "\n") -} - -var benchBucketName = []byte("bench") - -// BenchCommand represents the "bench" command execution. -type BenchCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewBenchCommand returns a BenchCommand using the -func newBenchCommand(m *Main) *BenchCommand { - return &BenchCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the "bench" command. -func (cmd *BenchCommand) Run(args ...string) error { - // Parse CLI arguments. - options, err := cmd.ParseFlags(args) - if err != nil { - return err - } - - // Remove path if "-work" is not set. Otherwise keep path. - if options.Work { - fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) - } else { - defer os.Remove(options.Path) - } - - // Create database. - db, err := bolt.Open(options.Path, 0666, nil) - if err != nil { - return err - } - db.NoSync = options.NoSync - defer db.Close() - - // Write to the database. - var results BenchResults - if err := cmd.runWrites(db, options, &results); err != nil { - return fmt.Errorf("write: ", err) - } - - // Read from the database. - if err := cmd.runReads(db, options, &results); err != nil { - return fmt.Errorf("bench: read: %s", err) - } - - // Print results. - fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) - fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) - fmt.Fprintln(os.Stderr, "") - return nil -} - -// ParseFlags parses the command line flags. -func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { - var options BenchOptions - - // Parse flagset. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") - fs.StringVar(&options.WriteMode, "write-mode", "seq", "") - fs.StringVar(&options.ReadMode, "read-mode", "seq", "") - fs.IntVar(&options.Iterations, "count", 1000, "") - fs.IntVar(&options.BatchSize, "batch-size", 0, "") - fs.IntVar(&options.KeySize, "key-size", 8, "") - fs.IntVar(&options.ValueSize, "value-size", 32, "") - fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") - fs.StringVar(&options.MemProfile, "memprofile", "", "") - fs.StringVar(&options.BlockProfile, "blockprofile", "", "") - fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") - fs.BoolVar(&options.NoSync, "no-sync", false, "") - fs.BoolVar(&options.Work, "work", false, "") - fs.StringVar(&options.Path, "path", "", "") - fs.SetOutput(cmd.Stderr) - if err := fs.Parse(args); err != nil { - return nil, err - } - - // Set batch size to iteration size if not set. - // Require that batch size can be evenly divided by the iteration count. - if options.BatchSize == 0 { - options.BatchSize = options.Iterations - } else if options.Iterations%options.BatchSize != 0 { - return nil, ErrNonDivisibleBatchSize - } - - // Generate temp path if one is not passed in. - if options.Path == "" { - f, err := ioutil.TempFile("", "bolt-bench-") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - f.Close() - os.Remove(f.Name()) - options.Path = f.Name() - } - - return &options, nil -} - -// Writes to the database. -func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for writes. - if options.ProfileMode == "rw" || options.ProfileMode == "w" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.WriteMode { - case "seq": - err = cmd.runWritesSequential(db, options, results) - case "rnd": - err = cmd.runWritesRandom(db, options, results) - case "seq-nest": - err = cmd.runWritesSequentialNested(db, options, results) - case "rnd-nest": - err = cmd.runWritesRandomNested(db, options, results) - default: - return fmt.Errorf("invalid write mode: %s", options.WriteMode) - } - - // Save time to write. - results.WriteDuration = time.Since(t) - - // Stop profiling for writes only. - if options.ProfileMode == "w" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists(benchBucketName) - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - key := make([]byte, options.KeySize) - value := make([]byte, options.ValueSize) - - // Write key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert key/value. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - top, err := tx.CreateBucketIfNotExists(benchBucketName) - if err != nil { - return err - } - top.FillPercent = options.FillPercent - - // Create bucket key. - name := make([]byte, options.KeySize) - binary.BigEndian.PutUint32(name, keySource()) - - // Create bucket. - b, err := top.CreateBucketIfNotExists(name) - if err != nil { - return err - } - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - var key = make([]byte, options.KeySize) - var value = make([]byte, options.ValueSize) - - // Generate key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert value into subbucket. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -// Reads from the database. -func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for reads. - if options.ProfileMode == "r" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.ReadMode { - case "seq": - switch options.WriteMode { - case "seq-nest", "rnd-nest": - err = cmd.runReadsSequentialNested(db, options, results) - default: - err = cmd.runReadsSequential(db, options, results) - } - default: - return fmt.Errorf("invalid read mode: %s", options.ReadMode) - } - - // Save read time. - results.ReadDuration = time.Since(t) - - // Stop profiling for reads. - if options.ProfileMode == "rw" || options.ProfileMode == "r" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - - c := tx.Bucket(benchBucketName).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return errors.New("invalid value") - } - count++ - } - - if options.WriteMode == "seq" && count != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - var top = tx.Bucket(benchBucketName) - if err := top.ForEach(func(name, _ []byte) error { - c := top.Bucket(name).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return ErrInvalidValue - } - count++ - } - return nil - }); err != nil { - return err - } - - if options.WriteMode == "seq-nest" && count != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -// File handlers for the various profiles. -var cpuprofile, memprofile, blockprofile *os.File - -// Starts all profiles set on the options. -func (cmd *BenchCommand) startProfiling(options *BenchOptions) { - var err error - - // Start CPU profiling. - if options.CPUProfile != "" { - cpuprofile, err = os.Create(options.CPUProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) - os.Exit(1) - } - pprof.StartCPUProfile(cpuprofile) - } - - // Start memory profiling. - if options.MemProfile != "" { - memprofile, err = os.Create(options.MemProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) - os.Exit(1) - } - runtime.MemProfileRate = 4096 - } - - // Start fatal profiling. - if options.BlockProfile != "" { - blockprofile, err = os.Create(options.BlockProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) - os.Exit(1) - } - runtime.SetBlockProfileRate(1) - } -} - -// Stops all profiles. -func (cmd *BenchCommand) stopProfiling() { - if cpuprofile != nil { - pprof.StopCPUProfile() - cpuprofile.Close() - cpuprofile = nil - } - - if memprofile != nil { - pprof.Lookup("heap").WriteTo(memprofile, 0) - memprofile.Close() - memprofile = nil - } - - if blockprofile != nil { - pprof.Lookup("block").WriteTo(blockprofile, 0) - blockprofile.Close() - blockprofile = nil - runtime.SetBlockProfileRate(0) - } -} - -// BenchOptions represents the set of options that can be passed to "bolt bench". -type BenchOptions struct { - ProfileMode string - WriteMode string - ReadMode string - Iterations int - BatchSize int - KeySize int - ValueSize int - CPUProfile string - MemProfile string - BlockProfile string - StatsInterval time.Duration - FillPercent float64 - NoSync bool - Work bool - Path string -} - -// BenchResults represents the performance results of the benchmark. -type BenchResults struct { - WriteOps int - WriteDuration time.Duration - ReadOps int - ReadDuration time.Duration -} - -// Returns the duration for a single write operation. -func (r *BenchResults) WriteOpDuration() time.Duration { - if r.WriteOps == 0 { - return 0 - } - return r.WriteDuration / time.Duration(r.WriteOps) -} - -// Returns average number of write operations that can be performed per second. -func (r *BenchResults) WriteOpsPerSecond() int { - var op = r.WriteOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -// Returns the duration for a single read operation. -func (r *BenchResults) ReadOpDuration() time.Duration { - if r.ReadOps == 0 { - return 0 - } - return r.ReadDuration / time.Duration(r.ReadOps) -} - -// Returns average number of read operations that can be performed per second. -func (r *BenchResults) ReadOpsPerSecond() int { - var op = r.ReadOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -type PageError struct { - ID int - Err error -} - -func (e *PageError) Error() string { - return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) -} - -// isPrintable returns true if the string is valid unicode and contains only printable runes. -func isPrintable(s string) bool { - if !utf8.ValidString(s) { - return false - } - for _, ch := range s { - if !unicode.IsPrint(ch) { - return false - } - } - return true -} - -// ReadPage reads page info & full page data from a path. -// This is not transactionally safe. -func ReadPage(path string, pageID int) (*page, []byte, error) { - // Find page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return nil, nil, fmt.Errorf("read page size: %s", err) - } - - // Open database file. - f, err := os.Open(path) - if err != nil { - return nil, nil, err - } - defer f.Close() - - // Read one block into buffer. - buf := make([]byte, pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - - // Determine total number of blocks. - p := (*page)(unsafe.Pointer(&buf[0])) - overflowN := p.overflow - - // Re-read entire page (with overflow) into buffer. - buf = make([]byte, (int(overflowN)+1)*pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - p = (*page)(unsafe.Pointer(&buf[0])) - - return p, buf, nil -} - -// ReadPageSize reads page size a path. -// This is not transactionally safe. -func ReadPageSize(path string) (int, error) { - // Open database file. - f, err := os.Open(path) - if err != nil { - return 0, err - } - defer f.Close() - - // Read 4KB chunk. - buf := make([]byte, 4096) - if _, err := io.ReadFull(f, buf); err != nil { - return 0, err - } - - // Read page size from metadata. - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - return int(m.pageSize), nil -} - -// atois parses a slice of strings into integers. -func atois(strs []string) ([]int, error) { - var a []int - for _, str := range strs { - i, err := strconv.Atoi(str) - if err != nil { - return nil, err - } - a = append(a, i) - } - return a, nil -} - -// DO NOT EDIT. Copied from the "bolt" package. -const maxAllocSize = 0xFFFFFFF - -// DO NOT EDIT. Copied from the "bolt" package. -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -// DO NOT EDIT. Copied from the "bolt" package. -const bucketLeafFlag = 0x01 - -// DO NOT EDIT. Copied from the "bolt" package. -type pgid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type txid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type bucket struct { - root pgid - sequence uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) Type() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go deleted file mode 100644 index b9e8c67..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package main_test - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/boltdb/bolt" - "github.com/boltdb/bolt/cmd/bolt" -) - -// Ensure the "info" command can print information about a database. -func TestInfoCommand_Run(t *testing.T) { - db := MustOpen(0666, nil) - db.DB.Close() - defer db.Close() - - // Run the info command. - m := NewMain() - if err := m.Run("info", db.Path); err != nil { - t.Fatal(err) - } -} - -// Ensure the "stats" command can execute correctly. -func TestStatsCommand_Run(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - - if err := db.Update(func(tx *bolt.Tx) error { - // Create "foo" bucket. - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - return err - } - for i := 0; i < 10; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "bar" bucket. - b, err = tx.CreateBucket([]byte("bar")) - if err != nil { - return err - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "baz" bucket. - b, err = tx.CreateBucket([]byte("baz")) - if err != nil { - return err - } - if err := b.Put([]byte("key"), []byte("value")); err != nil { - return err - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 3 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 1\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 111\n" + - "\tNumber of levels in B+tree: 1\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 4096\n" + - "\tBytes actually used for leaf data: 1996 (48%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 3\n" + - "\tTotal number on inlined buckets: 2 (66%)\n" + - "\tBytes used for inlined buckets: 236 (11%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Main represents a test wrapper for main.Main that records output. -type Main struct { - *main.Main - Stdin bytes.Buffer - Stdout bytes.Buffer - Stderr bytes.Buffer -} - -// NewMain returns a new instance of Main. -func NewMain() *Main { - m := &Main{Main: main.NewMain()} - m.Main.Stdin = &m.Stdin - m.Main.Stdout = &m.Stdout - m.Main.Stderr = &m.Stderr - return m -} - -// MustOpen creates a Bolt database in a temporary location. -func MustOpen(mode os.FileMode, options *bolt.Options) *DB { - // Create temporary path. - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - - db, err := bolt.Open(f.Name(), mode, options) - if err != nil { - panic(err.Error()) - } - return &DB{DB: db, Path: f.Name()} -} - -// DB is a test wrapper for bolt.DB. -type DB struct { - *bolt.DB - Path string -} - -// Close closes and removes the database. -func (db *DB) Close() error { - defer os.Remove(db.Path) - return db.DB.Close() -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go deleted file mode 100644 index 006c548..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go +++ /dev/null @@ -1,384 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.index >= ref.count() { - return nil, nil, 0 - } - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - return c.keyValue() -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go deleted file mode 100644 index b12e1f9..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go +++ /dev/null @@ -1,511 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "fmt" - "os" - "sort" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a cursor can return a reference to the bucket that created it. -func TestCursor_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - c := b.Cursor() - equals(t, b, c.Bucket()) - return nil - }) -} - -// Ensure that a Tx cursor can seek to the appropriate keys. -func TestCursor_Seek(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, b.Put([]byte("foo"), []byte("0001"))) - ok(t, b.Put([]byte("bar"), []byte("0002"))) - ok(t, b.Put([]byte("baz"), []byte("0003"))) - _, err = b.CreateBucket([]byte("bkt")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - - // Exact match should go to the key. - k, v := c.Seek([]byte("bar")) - equals(t, []byte("bar"), k) - equals(t, []byte("0002"), v) - - // Inexact match should go to the next key. - k, v = c.Seek([]byte("bas")) - equals(t, []byte("baz"), k) - equals(t, []byte("0003"), v) - - // Low key should go to the first key. - k, v = c.Seek([]byte("")) - equals(t, []byte("bar"), k) - equals(t, []byte("0002"), v) - - // High key should return no key. - k, v = c.Seek([]byte("zzz")) - assert(t, k == nil, "") - assert(t, v == nil, "") - - // Buckets should return their key but no value. - k, v = c.Seek([]byte("bkt")) - equals(t, []byte("bkt"), k) - assert(t, v == nil, "") - - return nil - }) -} - -func TestCursor_Delete(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var count = 1000 - - // Insert every other key between 0 and $count. - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - for i := 0; i < count; i += 1 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(i)) - b.Put(k, make([]byte, 100)) - } - b.CreateBucket([]byte("sub")) - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - bound := make([]byte, 8) - binary.BigEndian.PutUint64(bound, uint64(count/2)) - for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { - if err := c.Delete(); err != nil { - return err - } - } - c.Seek([]byte("sub")) - err := c.Delete() - equals(t, err, bolt.ErrIncompatibleValue) - return nil - }) - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - equals(t, b.Stats().KeyN, count/2+1) - return nil - }) -} - -// Ensure that a Tx cursor can seek to the appropriate keys when there are a -// large number of keys. This test also checks that seek will always move -// forward to the next key. -// -// Related: https://github.com/boltdb/bolt/pull/187 -func TestCursor_Seek_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var count = 10000 - - // Insert every other key between 0 and $count. - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - for i := 0; i < count; i += 100 { - for j := i; j < i+100; j += 2 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(j)) - b.Put(k, make([]byte, 100)) - } - } - return nil - }) - - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - for i := 0; i < count; i++ { - seek := make([]byte, 8) - binary.BigEndian.PutUint64(seek, uint64(i)) - - k, _ := c.Seek(seek) - - // The last seek is beyond the end of the the range so - // it should return nil. - if i == count-1 { - assert(t, k == nil, "") - continue - } - - // Otherwise we should seek to the exact key or the next key. - num := binary.BigEndian.Uint64(k) - if i%2 == 0 { - equals(t, uint64(i), num) - } else { - equals(t, uint64(i+1), num) - } - } - - return nil - }) -} - -// Ensure that a cursor can iterate over an empty bucket without error. -func TestCursor_EmptyBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.First() - assert(t, k == nil, "") - assert(t, v == nil, "") - return nil - }) -} - -// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. -func TestCursor_EmptyBucketReverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.Last() - assert(t, k == nil, "") - assert(t, v == nil, "") - return nil - }) -} - -// Ensure that a Tx cursor can iterate over a single root with a couple elements. -func TestCursor_Iterate_Leaf(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) - return nil - }) - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, v := c.First() - equals(t, string(k), "bar") - equals(t, v, []byte{1}) - - k, v = c.Next() - equals(t, string(k), "baz") - equals(t, v, []byte{}) - - k, v = c.Next() - equals(t, string(k), "foo") - equals(t, v, []byte{0}) - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - tx.Rollback() -} - -// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. -func TestCursor_LeafRootReverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) - return nil - }) - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, v := c.Last() - equals(t, string(k), "foo") - equals(t, v, []byte{0}) - - k, v = c.Prev() - equals(t, string(k), "baz") - equals(t, v, []byte{}) - - k, v = c.Prev() - equals(t, string(k), "bar") - equals(t, v, []byte{1}) - - k, v = c.Prev() - assert(t, k == nil, "") - assert(t, v == nil, "") - - k, v = c.Prev() - assert(t, k == nil, "") - assert(t, v == nil, "") - - tx.Rollback() -} - -// Ensure that a Tx cursor can restart from the beginning. -func TestCursor_Restart(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{}) - return nil - }) - - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, _ := c.First() - equals(t, string(k), "bar") - - k, _ = c.Next() - equals(t, string(k), "foo") - - k, _ = c.First() - equals(t, string(k), "bar") - - k, _ = c.Next() - equals(t, string(k), "foo") - - tx.Rollback() -} - -// Ensure that a Tx can iterate over all elements in a bucket. -func TestCursor_QuickCheck(t *testing.T) { - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - ok(t, tx.Commit()) - - // Sort test data. - sort.Sort(items) - - // Iterate over all items and check consistency. - var index = 0 - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - equals(t, k, items[index].Key) - equals(t, v, items[index].Value) - index++ - } - equals(t, len(items), index) - tx.Rollback() - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can iterate over all elements in a bucket in reverse. -func TestCursor_QuickCheck_Reverse(t *testing.T) { - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - ok(t, tx.Commit()) - - // Sort test data. - sort.Sort(revtestdata(items)) - - // Iterate over all items and check consistency. - var index = 0 - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - equals(t, k, items[index].Key) - equals(t, v, items[index].Value) - index++ - } - equals(t, len(items), index) - tx.Rollback() - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a Tx cursor can iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - _, err = b.CreateBucket([]byte("bar")) - ok(t, err) - _, err = b.CreateBucket([]byte("baz")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - names = append(names, string(k)) - assert(t, v == nil, "") - } - equals(t, names, []string{"bar", "baz", "foo"}) - return nil - }) -} - -// Ensure that a Tx cursor can reverse iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - _, err = b.CreateBucket([]byte("bar")) - ok(t, err) - _, err = b.CreateBucket([]byte("baz")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil; k, v = c.Prev() { - names = append(names, string(k)) - assert(t, v == nil, "") - } - equals(t, names, []string{"foo", "baz", "bar"}) - return nil - }) -} - -func ExampleCursor() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a read-write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - tx.CreateBucket([]byte("animals")) - - // Insert data into a bucket. - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in sorted key order. This starts from the - // first key/value pair and updates the k/v variables to the - // next key/value on each iteration. - // - // The loop finishes at the end of the cursor when a nil key is returned. - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }) - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} - -func ExampleCursor_reverse() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a read-write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - tx.CreateBucket([]byte("animals")) - - // Insert data into a bucket. - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in reverse sorted key order. This starts - // from the last key/value pair and updates the k/v variables to - // the previous key/value on each iteration. - // - // The loop finishes at the beginning of the cursor when a nil key - // is returned. - for k, v := c.Last(); k != nil; k, v = c.Prev() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }) - - // Output: - // A liger is awesome. - // A dog is fun. - // A cat is lame. -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db.go deleted file mode 100644 index d39c4aa..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go +++ /dev/null @@ -1,792 +0,0 @@ -package bolt - -import ( - "fmt" - "hash/fnv" - "os" - "runtime" - "runtime/debug" - "strings" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronzied using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond -) - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - path string - file *os.File - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - freelist *freelist - stats Stats - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoGrowSync = options.NoGrowSync - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - // Open data file and separate sync handler for metadata writes. - db.path = path - var err error - if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db.file, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - return nil, fmt.Errorf("stat error: %s", err) - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - return nil, fmt.Errorf("meta0 error: %s", err) - } - db.pageSize = int(m.pageSize) - } - } - - // Memory map the data file. - if err := db.mmap(0); err != nil { - _ = db.close() - return nil, err - } - - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) - - // Mark the database as opened and return. - return db, nil -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - var size = int(info.Size()) - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. - if err := db.meta0.validate(); err != nil { - return fmt.Errorf("meta0 error: %s", err) - } - if err := db.meta1.validate(); err != nil { - return fmt.Errorf("meta1 error: %s", err) - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 1MB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - - return nil -} - -// Close releases all database resources. -// All transactions must be closed before closing the database. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.RLock() - defer db.mmaplock.RUnlock() - - return db.close() -} - -func (db *DB) close() error { - db.opened = false - - db.freelist = nil - db.path = "" - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - _ = funlock(db.file) - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be depedent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } - } - if minid > 0 { - db.freelist.release(minid - 1) - } - - return t, nil -} - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - if err := t.Rollback(); err != nil { - return err - } - - return nil -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - if db.meta0.txid > db.meta1.txid { - return db.meta0 - } - return db.meta1 -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { - // Allocate a temporary buffer for the page. - buf := make([]byte, count*db.pageSize) - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } else if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go deleted file mode 100644 index dddf22b..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go +++ /dev/null @@ -1,903 +0,0 @@ -package bolt_test - -import ( - "encoding/binary" - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "runtime" - "sort" - "strings" - "testing" - "time" - - "github.com/boltdb/bolt" -) - -var statsFlag = flag.Bool("stats", false, "show performance stats") - -// Ensure that opening a database with a bad path returns an error. -func TestOpen_BadPath(t *testing.T) { - db, err := bolt.Open("", 0666, nil) - assert(t, err != nil, "err: %s", err) - assert(t, db == nil, "") -} - -// Ensure that a database can be opened without error. -func TestOpen(t *testing.T) { - path := tempfile() - defer os.Remove(path) - db, err := bolt.Open(path, 0666, nil) - assert(t, db != nil, "") - ok(t, err) - equals(t, db.Path(), path) - ok(t, db.Close()) -} - -// Ensure that opening an already open database file will timeout. -func TestOpen_Timeout(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("timeout not supported on windows") - } - - path := tempfile() - defer os.Remove(path) - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - assert(t, db0 != nil, "") - ok(t, err) - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) - assert(t, db1 == nil, "") - equals(t, bolt.ErrTimeout, err) - assert(t, time.Since(start) > 100*time.Millisecond, "") - - db0.Close() -} - -// Ensure that opening an already open database file will wait until its closed. -func TestOpen_Wait(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("timeout not supported on windows") - } - - path := tempfile() - defer os.Remove(path) - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - assert(t, db0 != nil, "") - ok(t, err) - - // Close it in just a bit. - time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) - assert(t, db1 != nil, "") - ok(t, err) - assert(t, time.Since(start) > 100*time.Millisecond, "") -} - -// Ensure that opening a database does not increase its size. -// https://github.com/boltdb/bolt/issues/291 -func TestOpen_Size(t *testing.T) { - // Open a data file. - db := NewTestDB() - path := db.Path() - defer db.Close() - - // Insert until we get above the minimum 4MB size. - ok(t, db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for i := 0; i < 10000; i++ { - ok(t, b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000))) - } - return nil - })) - - // Close database and grab the size. - db.DB.Close() - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) - ok(t, db0.Close()) - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - if sz != newSz { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that opening a database beyond the max step size does not increase its size. -// https://github.com/boltdb/bolt/issues/303 -func TestOpen_Size_Large(t *testing.T) { - if testing.Short() { - t.Skip("short mode") - } - - // Open a data file. - db := NewTestDB() - path := db.Path() - defer db.Close() - - // Insert until we get above the minimum 4MB size. - var index uint64 - for i := 0; i < 10000; i++ { - ok(t, db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for j := 0; j < 1000; j++ { - ok(t, b.Put(u64tob(index), make([]byte, 50))) - index++ - } - return nil - })) - } - - // Close database and grab the size. - db.DB.Close() - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } else if sz < (1 << 30) { - t.Fatalf("expected larger initial size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) - ok(t, db0.Close()) - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - if sz != newSz { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that a re-opened database is consistent. -func TestOpen_Check(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - db, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) - db.Close() - - db, err = bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) - db.Close() -} - -// Ensure that the database returns an error if the file handle cannot be open. -func TestDB_Open_FileError(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - _, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil) - assert(t, err.(*os.PathError) != nil, "") - equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path) - equals(t, "open", err.(*os.PathError).Op) -} - -// Ensure that write errors to the meta file handler during initialization are returned. -func TestDB_Open_MetaInitWriteError(t *testing.T) { - t.Skip("pending") -} - -// Ensure that a database that is too small returns an error. -func TestDB_Open_FileTooSmall(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - db, err := bolt.Open(path, 0666, nil) - ok(t, err) - db.Close() - - // corrupt the database - ok(t, os.Truncate(path, int64(os.Getpagesize()))) - - db, err = bolt.Open(path, 0666, nil) - equals(t, errors.New("file size too small"), err) -} - -// Ensure that a database can be opened in read-only mode by multiple processes -// and that a database can not be opened in read-write mode and in read-only -// mode at the same time. -func TestOpen_ReadOnly(t *testing.T) { - bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`) - - path := tempfile() - defer os.Remove(path) - - // Open in read-write mode. - db, err := bolt.Open(path, 0666, nil) - ok(t, db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(bucket) - if err != nil { - return err - } - return b.Put(key, value) - })) - assert(t, db != nil, "") - assert(t, !db.IsReadOnly(), "") - ok(t, err) - ok(t, db.Close()) - - // Open in read-only mode. - db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - ok(t, err) - defer db0.Close() - - // Opening in read-write mode should return an error. - _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}) - assert(t, err != nil, "") - - // And again (in read-only mode). - db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - ok(t, err) - defer db1.Close() - - // Verify both read-only databases are accessible. - for _, db := range []*bolt.DB{db0, db1} { - // Verify is is in read only mode indeed. - assert(t, db.IsReadOnly(), "") - - // Read-only databases should not allow updates. - assert(t, - bolt.ErrDatabaseReadOnly == db.Update(func(*bolt.Tx) error { - panic(`should never get here`) - }), - "") - - // Read-only databases should not allow beginning writable txns. - _, err = db.Begin(true) - assert(t, bolt.ErrDatabaseReadOnly == err, "") - - // Verify the data. - ok(t, db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(bucket) - if b == nil { - return fmt.Errorf("expected bucket `%s`", string(bucket)) - } - - got := string(b.Get(key)) - expected := string(value) - if got != expected { - return fmt.Errorf("expected `%s`, got `%s`", expected, got) - } - return nil - })) - } -} - -// TODO(benbjohnson): Test corruption at every byte of the first two pages. - -// Ensure that a database cannot open a transaction when it's not open. -func TestDB_Begin_DatabaseNotOpen(t *testing.T) { - var db bolt.DB - tx, err := db.Begin(false) - assert(t, tx == nil, "") - equals(t, err, bolt.ErrDatabaseNotOpen) -} - -// Ensure that a read-write transaction can be retrieved. -func TestDB_BeginRW(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, err := db.Begin(true) - assert(t, tx != nil, "") - ok(t, err) - assert(t, tx.DB() == db.DB, "") - equals(t, tx.Writable(), true) - ok(t, tx.Commit()) -} - -// Ensure that opening a transaction while the DB is closed returns an error. -func TestDB_BeginRW_Closed(t *testing.T) { - var db bolt.DB - tx, err := db.Begin(true) - equals(t, err, bolt.ErrDatabaseNotOpen) - assert(t, tx == nil, "") -} - -func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } -func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } - -// Ensure that a database cannot close while transactions are open. -func testDB_Close_PendingTx(t *testing.T, writable bool) { - db := NewTestDB() - defer db.Close() - - // Start transaction. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - // Open update in separate goroutine. - done := make(chan struct{}) - go func() { - db.Close() - close(done) - }() - - // Ensure database hasn't closed. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - t.Fatal("database closed too early") - default: - } - - // Commit transaction. - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Ensure database closed now. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - default: - t.Fatal("database did not close") - } -} - -// Ensure a database can provide a transactional block. -func TestDB_Update(t *testing.T) { - db := NewTestDB() - defer db.Close() - err := db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - b.Put([]byte("baz"), []byte("bat")) - b.Delete([]byte("foo")) - return nil - }) - ok(t, err) - err = db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) - ok(t, err) -} - -// Ensure a closed database returns an error while running a transaction block -func TestDB_Update_Closed(t *testing.T) { - var db bolt.DB - err := db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - equals(t, err, bolt.ErrDatabaseNotOpen) -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_Update_ManualCommit(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Commit() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_Update_ManualRollback(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Rollback() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_View_ManualCommit(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Commit() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_View_ManualRollback(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Rollback() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a write transaction that panics does not hold open locks. -func TestDB_Update_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: update", r) - } - }() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - panic("omg") - }) - }() - - // Verify we can update again. - err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - ok(t, err) - - // Verify that our change persisted. - err = db.Update(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - return nil - }) -} - -// Ensure a database can return an error through a read-only transactional block. -func TestDB_View_Error(t *testing.T) { - db := NewTestDB() - defer db.Close() - err := db.View(func(tx *bolt.Tx) error { - return errors.New("xxx") - }) - equals(t, errors.New("xxx"), err) -} - -// Ensure a read transaction that panics does not hold open locks. -func TestDB_View_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: view", r) - } - }() - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - panic("omg") - }) - }() - - // Verify that we can still use read transactions. - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - return nil - }) -} - -// Ensure that an error is returned when a database write fails. -func TestDB_Commit_WriteFail(t *testing.T) { - t.Skip("pending") // TODO(benbjohnson) -} - -// Ensure that DB stats can be returned. -func TestDB_Stats(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - stats := db.Stats() - equals(t, 2, stats.TxStats.PageCount) - equals(t, 0, stats.FreePageN) - equals(t, 2, stats.PendingPageN) -} - -// Ensure that database pages are in expected order and type. -func TestDB_Consistency(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - for i := 0; i < 10; i++ { - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - return nil - }) - } - db.Update(func(tx *bolt.Tx) error { - p, _ := tx.Page(0) - assert(t, p != nil, "") - equals(t, "meta", p.Type) - - p, _ = tx.Page(1) - assert(t, p != nil, "") - equals(t, "meta", p.Type) - - p, _ = tx.Page(2) - assert(t, p != nil, "") - equals(t, "free", p.Type) - - p, _ = tx.Page(3) - assert(t, p != nil, "") - equals(t, "free", p.Type) - - p, _ = tx.Page(4) - assert(t, p != nil, "") - equals(t, "leaf", p.Type) - - p, _ = tx.Page(5) - assert(t, p != nil, "") - equals(t, "freelist", p.Type) - - p, _ = tx.Page(6) - assert(t, p == nil, "") - return nil - }) -} - -// Ensure that DB stats can be substracted from one another. -func TestDBStats_Sub(t *testing.T) { - var a, b bolt.Stats - a.TxStats.PageCount = 3 - a.FreePageN = 4 - b.TxStats.PageCount = 10 - b.FreePageN = 14 - diff := b.Sub(&a) - equals(t, 7, diff.TxStats.PageCount) - // free page stats are copied from the receiver and not subtracted - equals(t, 14, diff.FreePageN) -} - -func ExampleDB_Update() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Execute several commands within a write transaction. - err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }) - - // If our transactional block didn't return an error then our data is saved. - if err == nil { - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }) - } - - // Output: - // The value of 'foo' is: bar -} - -func ExampleDB_View() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Insert data into a bucket. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("people")) - b := tx.Bucket([]byte("people")) - b.Put([]byte("john"), []byte("doe")) - b.Put([]byte("susy"), []byte("que")) - return nil - }) - - // Access data from within a read-only transactional block. - db.View(func(tx *bolt.Tx) error { - v := tx.Bucket([]byte("people")).Get([]byte("john")) - fmt.Printf("John's last name is %s.\n", v) - return nil - }) - - // Output: - // John's last name is doe. -} - -func ExampleDB_Begin_ReadOnly() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - // Create several keys in a transaction. - tx, _ := db.Begin(true) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("john"), []byte("blue")) - b.Put([]byte("abby"), []byte("red")) - b.Put([]byte("zephyr"), []byte("purple")) - tx.Commit() - - // Iterate over the values in sorted key order. - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("%s likes %s\n", k, v) - } - tx.Rollback() - - // Output: - // abby likes red - // john likes blue - // zephyr likes purple -} - -// TestDB represents a wrapper around a Bolt DB to handle temporary file -// creation and automatic cleanup on close. -type TestDB struct { - *bolt.DB -} - -// NewTestDB returns a new instance of TestDB. -func NewTestDB() *TestDB { - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - panic("cannot open db: " + err.Error()) - } - return &TestDB{db} -} - -// MustView executes a read-only function. Panic on error. -func (db *TestDB) MustView(fn func(tx *bolt.Tx) error) { - if err := db.DB.View(func(tx *bolt.Tx) error { - return fn(tx) - }); err != nil { - panic(err.Error()) - } -} - -// MustUpdate executes a read-write function. Panic on error. -func (db *TestDB) MustUpdate(fn func(tx *bolt.Tx) error) { - if err := db.DB.View(func(tx *bolt.Tx) error { - return fn(tx) - }); err != nil { - panic(err.Error()) - } -} - -// MustCreateBucket creates a new bucket. Panic on error. -func (db *TestDB) MustCreateBucket(name []byte) { - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte(name)) - return err - }); err != nil { - panic(err.Error()) - } -} - -// Close closes the database and deletes the underlying file. -func (db *TestDB) Close() { - // Log statistics. - if *statsFlag { - db.PrintStats() - } - - // Check database consistency after every test. - db.MustCheck() - - // Close database and remove file. - defer os.Remove(db.Path()) - db.DB.Close() -} - -// PrintStats prints the database stats -func (db *TestDB) PrintStats() { - var stats = db.Stats() - fmt.Printf("[db] %-20s %-20s %-20s\n", - fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), - fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), - fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), - ) - fmt.Printf(" %-20s %-20s %-20s\n", - fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), - fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), - fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), - ) -} - -// MustCheck runs a consistency check on the database and panics if any errors are found. -func (db *TestDB) MustCheck() { - db.Update(func(tx *bolt.Tx) error { - // Collect all the errors. - var errors []error - for err := range tx.Check() { - errors = append(errors, err) - if len(errors) > 10 { - break - } - } - - // If errors occurred, copy the DB and print the errors. - if len(errors) > 0 { - var path = tempfile() - tx.CopyFile(path, 0600) - - // Print errors. - fmt.Print("\n\n") - fmt.Printf("consistency check failed (%d errors)\n", len(errors)) - for _, err := range errors { - fmt.Println(err) - } - fmt.Println("") - fmt.Println("db saved to:") - fmt.Println(path) - fmt.Print("\n\n") - os.Exit(-1) - } - - return nil - }) -} - -// CopyTempFile copies a database to a temporary file. -func (db *TestDB) CopyTempFile() { - path := tempfile() - db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) }) - fmt.Println("db copied to: ", path) -} - -// tempfile returns a temporary file path. -func tempfile() string { - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - return f.Name() -} - -// mustContainKeys checks that a bucket contains a given set of keys. -func mustContainKeys(b *bolt.Bucket, m map[string]string) { - found := make(map[string]string) - b.ForEach(func(k, _ []byte) error { - found[string(k)] = "" - return nil - }) - - // Check for keys found in bucket that shouldn't be there. - var keys []string - for k, _ := range found { - if _, ok := m[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) - } - - // Check for keys not found in bucket that should be there. - for k, _ := range m { - if _, ok := found[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) - } -} - -func trunc(b []byte, length int) []byte { - if length < len(b) { - return b[:length] - } - return b -} - -func truncDuration(d time.Duration) string { - return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") -} - -func fileSize(path string) int64 { - fi, err := os.Stat(path) - if err != nil { - return 0 - } - return fi.Size() -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go b/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go deleted file mode 100644 index cc93784..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Package bolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bolt diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go b/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go deleted file mode 100644 index 6883786..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go +++ /dev/null @@ -1,70 +0,0 @@ -package bolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when a data file is not a Bolt-formatted database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go deleted file mode 100644 index 0161948..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go +++ /dev/null @@ -1,242 +0,0 @@ -package bolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist() *freelist { - return &freelist{ - pending: make(map[txid][]pgid), - cache: make(map[pgid]bool), - } -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// free_count returns count of free pages -func (f *freelist) free_count() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, list := range f.pending { - count += len(list) - } - return count -} - -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) - } - - sort.Sort(m) - return pgids(f.ids).merge(m) -} - -// allocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - var ids = f.pending[txid] - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - - // Add to the freelist and cache. - ids = append(ids, id) - f.cache[id] = true - } - f.pending[txid] = ids -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, ids := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) - } - - // Remove pages from pending list. - delete(f.pending, txid) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) - if count == 0xFFFF { - idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) - } - - // Copy the list of page ids from the freelist. - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] - f.ids = make([]pgid, len(ids)) - copy(f.ids, ids) - - // Make sure they're sorted. - sort.Sort(pgids(f.ids)) - - // Rebuild the page cache. - f.reindex() -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) - } else { - p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.ids { - if !pcache[id] { - a = append(a, id) - } - } - f.ids = a - - // Once the available list is rebuilt then rebuild the free cache so that - // it includes the available and pending free pages. - f.reindex() -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - f.cache = make(map[pgid]bool) - for _, id := range f.ids { - f.cache[id] = true - } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - f.cache[pendingID] = true - } - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go deleted file mode 100644 index 8caeab2..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package bolt - -import ( - "math/rand" - "reflect" - "sort" - "testing" - "unsafe" -) - -// Ensure that a page is added to a transaction's freelist. -func TestFreelist_free(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12}) - if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) - } -} - -// Ensure that a page and its overflow is added to a transaction's freelist. -func TestFreelist_free_overflow(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 3}) - if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) - } -} - -// Ensure that a transaction's free pages can be released. -func TestFreelist_release(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 1}) - f.free(100, &page{id: 9}) - f.free(102, &page{id: 39}) - f.release(100) - f.release(101) - if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - f.release(102) - if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can find contiguous blocks of pages. -func TestFreelist_allocate(t *testing.T) { - f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} - if id := int(f.allocate(3)); id != 3 { - t.Fatalf("exp=3; got=%v", id) - } - if id := int(f.allocate(1)); id != 6 { - t.Fatalf("exp=6; got=%v", id) - } - if id := int(f.allocate(3)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(2)); id != 12 { - t.Fatalf("exp=12; got=%v", id) - } - if id := int(f.allocate(1)); id != 7 { - t.Fatalf("exp=7; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - if id := int(f.allocate(1)); id != 9 { - t.Fatalf("exp=9; got=%v", id) - } - if id := int(f.allocate(1)); id != 18 { - t.Fatalf("exp=18; got=%v", id) - } - if id := int(f.allocate(1)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can deserialize from a freelist page. -func TestFreelist_read(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = freelistPageFlag - page.count = 2 - - // Insert 2 page ids. - ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) - ids[0] = 23 - ids[1] = 50 - - // Deserialize page into a freelist. - f := newFreelist() - f.read(page) - - // Ensure that there are two page ids in the freelist. - if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can serialize into a freelist page. -func TestFreelist_write(t *testing.T) { - // Create a freelist and write it to a page. - var buf [4096]byte - f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} - f.pending[100] = []pgid{28, 11} - f.pending[101] = []pgid{3} - p := (*page)(unsafe.Pointer(&buf[0])) - f.write(p) - - // Read the page back out. - f2 := newFreelist() - f2.read(p) - - // Ensure that the freelist is correct. - // All pages should be present and in reverse order. - if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { - t.Fatalf("exp=%v; got=%v", exp, f2.ids) - } -} - -func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } -func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } -func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } -func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } - -func benchmark_FreelistRelease(b *testing.B, size int) { - ids := randomPgids(size) - pending := randomPgids(len(ids) / 400) - b.ResetTimer() - for i := 0; i < b.N; i++ { - f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} - f.release(1) - } -} - -func randomPgids(n int) []pgid { - rand.Seed(42) - pgids := make(pgids, n) - for i := range pgids { - pgids[i] = pgid(rand.Int63()) - } - sort.Sort(pgids) - return pgids -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/node.go b/Godeps/_workspace/src/github.com/boltdb/bolt/node.go deleted file mode 100644 index c9fb21c..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/node.go +++ /dev/null @@ -1,636 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - } - return sz -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 - klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } - - // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i - inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If target node has extra nodes then just move one over. - if target.numChildren() > target.minKeys() { - if useNextSibling { - // Reparent and move node. - if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - n.inodes = append(n.inodes, target.inodes[0]) - target.inodes = target.inodes[1:] - - // Update target key on parent. - target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0) - target.key = target.inodes[0].key - _assert(len(target.key) > 0, "rebalance(1): zero-length node key") - } else { - // Reparent and move node. - if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[1:], n.inodes) - n.inodes[0] = target.inodes[len(target.inodes)-1] - target.inodes = target.inodes[:len(target.inodes)-1] - } - - // Update parent key for node. - n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0) - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "rebalance(2): zero-length node key") - - return - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go deleted file mode 100644 index fa5d10f..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package bolt - -import ( - "testing" - "unsafe" -) - -// Ensure that a node can insert a key/value. -func TestNode_put(t *testing.T) { - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} - n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) - n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) - - if len(n.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if n.inodes[2].flags != uint32(leafPageFlag) { - t.Fatalf("not a leaf: %d", n.inodes[2].flags) - } -} - -// Ensure that a node can deserialize from a leaf page. -func TestNode_read_LeafPage(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = leafPageFlag - page.count = 2 - - // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 - nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) - nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 - nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 - - // Write data for the nodes at the end. - data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) - copy(data[:], []byte("barfooz")) - copy(data[7:], []byte("helloworldbye")) - - // Deserialize page into a leaf. - n := &node{} - n.read(page) - - // Check that there are two inodes with correct data. - if !n.isLeaf { - t.Fatal("expected leaf") - } - if len(n.inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can serialize into a leaf page. -func TestNode_write_LeafPage(t *testing.T) { - // Create a node. - n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) - n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) - n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) - - // Write it to a page. - var buf [4096]byte - p := (*page)(unsafe.Pointer(&buf[0])) - n.write(p) - - // Read the page back in. - n2 := &node{} - n2.read(p) - - // Check that the two pages are the same. - if len(n2.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n2.inodes)) - } - if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can split into appropriate subgroups. -func TestNode_split(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split between 2 & 3. - n.split(100) - - var parent = n.parent - if len(parent.children) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children)) - } - if len(parent.children[0].inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) - } - if len(parent.children[1].inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) - } -} - -// Ensure that a page with the minimum number of inodes just returns a single node. -func TestNode_split_MinKeys(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(20) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} - -// Ensure that a node that has keys that all fit on a page just returns one leaf. -func TestNode_split_SinglePage(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(4096) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/page.go b/Godeps/_workspace/src/github.com/boltdb/bolt/page.go deleted file mode 100644 index 818aa1b..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/page.go +++ /dev/null @@ -1,172 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) - -const minKeysPerPage = 2 - -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } else if len(b) == 0 { - return a - } - - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - merged = append(merged, follow...) - - return merged -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go deleted file mode 100644 index 59f4a30..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package bolt - -import ( - "reflect" - "sort" - "testing" - "testing/quick" -) - -// Ensure that the page type can be returned in human readable format. -func TestPage_typ(t *testing.T) { - if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { - t.Fatalf("exp=branch; got=%v", typ) - } - if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { - t.Fatalf("exp=leaf; got=%v", typ) - } - if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { - t.Fatalf("exp=meta; got=%v", typ) - } - if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { - t.Fatalf("exp=freelist; got=%v", typ) - } - if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { - t.Fatalf("exp=unknown<4e20>; got=%v", typ) - } -} - -// Ensure that the hexdump debugging function doesn't blow up. -func TestPage_dump(t *testing.T) { - (&page{id: 256}).hexdump(16) -} - -func TestPgids_merge(t *testing.T) { - a := pgids{4, 5, 6, 10, 11, 12, 13, 27} - b := pgids{1, 3, 8, 9, 25, 30} - c := a.merge(b) - if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { - t.Errorf("mismatch: %v", c) - } - - a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} - b = pgids{8, 9, 25, 30} - c = a.merge(b) - if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { - t.Errorf("mismatch: %v", c) - } -} - -func TestPgids_merge_quick(t *testing.T) { - if err := quick.Check(func(a, b pgids) bool { - // Sort incoming lists. - sort.Sort(a) - sort.Sort(b) - - // Merge the two lists together. - got := a.merge(b) - - // The expected value should be the two lists combined and sorted. - exp := append(a, b...) - sort.Sort(exp) - - if !reflect.DeepEqual(exp, got) { - t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) - return false - } - - return true - }, nil); err != nil { - t.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go deleted file mode 100644 index 4da5817..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package bolt_test - -import ( - "bytes" - "flag" - "fmt" - "math/rand" - "os" - "reflect" - "testing/quick" - "time" -) - -// testing/quick defaults to 5 iterations and a random seed. -// You can override these settings from the command line: -// -// -quick.count The number of iterations to perform. -// -quick.seed The seed to use for randomizing. -// -quick.maxitems The maximum number of items to insert into a DB. -// -quick.maxksize The maximum size of a key. -// -quick.maxvsize The maximum size of a value. -// - -var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int - -func init() { - flag.IntVar(&qcount, "quick.count", 5, "") - flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") - flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") - flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") - flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") - flag.Parse() - fmt.Fprintln(os.Stderr, "seed:", qseed) - fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) -} - -func qconfig() *quick.Config { - return &quick.Config{ - MaxCount: qcount, - Rand: rand.New(rand.NewSource(int64(qseed))), - } -} - -type testdata []testdataitem - -func (t testdata) Len() int { return len(t) } -func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } - -func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { - n := rand.Intn(qmaxitems-1) + 1 - items := make(testdata, n) - for i := 0; i < n; i++ { - item := &items[i] - item.Key = randByteSlice(rand, 1, qmaxksize) - item.Value = randByteSlice(rand, 0, qmaxvsize) - } - return reflect.ValueOf(items) -} - -type revtestdata []testdataitem - -func (t revtestdata) Len() int { return len(t) } -func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } - -type testdataitem struct { - Key []byte - Value []byte -} - -func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { - n := rand.Intn(maxSize-minSize) + minSize - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go deleted file mode 100644 index ceb8bae..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go +++ /dev/null @@ -1,327 +0,0 @@ -package bolt_test - -import ( - "bytes" - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } -func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } -func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } - -func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } -func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } -func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } -func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } - -func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } -func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } -func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } - -func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } - -// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. -func testSimulate(t *testing.T, threadCount, parallelism int) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - rand.Seed(int64(qseed)) - - // A list of operations that readers and writers can perform. - var readerHandlers = []simulateHandler{simulateGetHandler} - var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} - - var versions = make(map[int]*QuickDB) - versions[1] = NewQuickDB() - - db := NewTestDB() - defer db.Close() - - var mutex sync.Mutex - - // Run n threads in parallel, each with their own operation. - var wg sync.WaitGroup - var threads = make(chan bool, parallelism) - var i int - for { - threads <- true - wg.Add(1) - writable := ((rand.Int() % 100) < 20) // 20% writers - - // Choose an operation to execute. - var handler simulateHandler - if writable { - handler = writerHandlers[rand.Intn(len(writerHandlers))] - } else { - handler = readerHandlers[rand.Intn(len(readerHandlers))] - } - - // Execute a thread for the given operation. - go func(writable bool, handler simulateHandler) { - defer wg.Done() - - // Start transaction. - tx, err := db.Begin(writable) - if err != nil { - t.Fatal("tx begin: ", err) - } - - // Obtain current state of the dataset. - mutex.Lock() - var qdb = versions[tx.ID()] - if writable { - qdb = versions[tx.ID()-1].Copy() - } - mutex.Unlock() - - // Make sure we commit/rollback the tx at the end and update the state. - if writable { - defer func() { - mutex.Lock() - versions[tx.ID()] = qdb - mutex.Unlock() - - ok(t, tx.Commit()) - }() - } else { - defer tx.Rollback() - } - - // Ignore operation if we don't have data yet. - if qdb == nil { - return - } - - // Execute handler. - handler(tx, qdb) - - // Release a thread back to the scheduling loop. - <-threads - }(writable, handler) - - i++ - if i > threadCount { - break - } - } - - // Wait until all threads are done. - wg.Wait() -} - -type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) - -// Retrieves a key from the database and verifies that it is what is expected. -func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { - // Randomly retrieve an existing exist. - keys := qdb.Rand() - if len(keys) == 0 { - return - } - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) - } - - // Drill into nested buckets. - for _, key := range keys[1 : len(keys)-1] { - b = b.Bucket(key) - if b == nil { - panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) - } - } - - // Verify key/value on the final bucket. - expected := qdb.Get(keys) - actual := b.Get(keys[len(keys)-1]) - if !bytes.Equal(actual, expected) { - fmt.Println("=== EXPECTED ===") - fmt.Println(expected) - fmt.Println("=== ACTUAL ===") - fmt.Println(actual) - fmt.Println("=== END ===") - panic("value mismatch") - } -} - -// Inserts a key into the database. -func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { - var err error - keys, value := randKeys(), randValue() - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - b, err = tx.CreateBucket(keys[0]) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - - // Create nested buckets, if necessary. - for _, key := range keys[1 : len(keys)-1] { - child := b.Bucket(key) - if child != nil { - b = child - } else { - b, err = b.CreateBucket(key) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - } - - // Insert into database. - if err := b.Put(keys[len(keys)-1], value); err != nil { - panic("put: " + err.Error()) - } - - // Insert into in-memory database. - qdb.Put(keys, value) -} - -// QuickDB is an in-memory database that replicates the functionality of the -// Bolt DB type except that it is entirely in-memory. It is meant for testing -// that the Bolt database is consistent. -type QuickDB struct { - sync.RWMutex - m map[string]interface{} -} - -// NewQuickDB returns an instance of QuickDB. -func NewQuickDB() *QuickDB { - return &QuickDB{m: make(map[string]interface{})} -} - -// Get retrieves the value at a key path. -func (db *QuickDB) Get(keys [][]byte) []byte { - db.RLock() - defer db.RUnlock() - - m := db.m - for _, key := range keys[:len(keys)-1] { - value := m[string(key)] - if value == nil { - return nil - } - switch value := value.(type) { - case map[string]interface{}: - m = value - case []byte: - return nil - } - } - - // Only return if it's a simple value. - if value, ok := m[string(keys[len(keys)-1])].([]byte); ok { - return value - } - return nil -} - -// Put inserts a value into a key path. -func (db *QuickDB) Put(keys [][]byte, value []byte) { - db.Lock() - defer db.Unlock() - - // Build buckets all the way down the key path. - m := db.m - for _, key := range keys[:len(keys)-1] { - if _, ok := m[string(key)].([]byte); ok { - return // Keypath intersects with a simple value. Do nothing. - } - - if m[string(key)] == nil { - m[string(key)] = make(map[string]interface{}) - } - m = m[string(key)].(map[string]interface{}) - } - - // Insert value into the last key. - m[string(keys[len(keys)-1])] = value -} - -// Rand returns a random key path that points to a simple value. -func (db *QuickDB) Rand() [][]byte { - db.RLock() - defer db.RUnlock() - if len(db.m) == 0 { - return nil - } - var keys [][]byte - db.rand(db.m, &keys) - return keys -} - -func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { - i, index := 0, rand.Intn(len(m)) - for k, v := range m { - if i == index { - *keys = append(*keys, []byte(k)) - if v, ok := v.(map[string]interface{}); ok { - db.rand(v, keys) - } - return - } - i++ - } - panic("quickdb rand: out-of-range") -} - -// Copy copies the entire database. -func (db *QuickDB) Copy() *QuickDB { - db.RLock() - defer db.RUnlock() - return &QuickDB{m: db.copy(db.m)} -} - -func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { - clone := make(map[string]interface{}, len(m)) - for k, v := range m { - switch v := v.(type) { - case map[string]interface{}: - clone[k] = db.copy(v) - default: - clone[k] = v - } - } - return clone -} - -func randKey() []byte { - var min, max = 1, 1024 - n := rand.Intn(max-min) + min - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} - -func randKeys() [][]byte { - var keys [][]byte - var count = rand.Intn(2) + 2 - for i := 0; i < count; i++ { - keys = append(keys, randKey()) - } - return keys -} - -func randValue() []byte { - n := rand.Intn(8192) - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go deleted file mode 100644 index 6b52b2c..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go +++ /dev/null @@ -1,611 +0,0 @@ -package bolt - -import ( - "fmt" - "io" - "os" - "sort" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. - if tx.db.StrictMode { - if err, ok := <-tx.Check(); ok { - panic("check fail: " + err.Error()) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.rollback() - return nil -} - -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove writer lock. - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - tx.db = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() in -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader directly. - var f *os.File - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { - // Fallback to a regular open if that doesn't work. - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil { - return 0, err - } - } - - // Copy the meta pages. - tx.db.metalock.Lock() - n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) - tx.db.metalock.Unlock() - if err != nil { - _ = f.Close() - return n, fmt.Errorf("meta copy: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - _ = f.Close() - return n, err - } - - return n, f.Close() -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - err = tx.Copy(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Check if any pages are double freed. - freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount++ - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - size := (int(p.overflow) + 1) * tx.db.pageSize - offset := int64(p.id) * int64(tx.db.pageSize) - - // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) - for { - // Limit our write to our max allocation size. - sz := size - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - - // Write chunk to disk. - buf := ptr[:sz] - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - size -= sz - if size == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Clear out page cache. - tx.pages = make(map[pgid]*page) - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary bufferred page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go deleted file mode 100644 index 6c8271a..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go +++ /dev/null @@ -1,456 +0,0 @@ -package bolt_test - -import ( - "errors" - "fmt" - "os" - "testing" - - "github.com/boltdb/bolt" -) - -// Ensure that committing a closed transaction returns an error. -func TestTx_Commit_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("foo")) - ok(t, tx.Commit()) - equals(t, tx.Commit(), bolt.ErrTxClosed) -} - -// Ensure that rolling back a closed transaction returns an error. -func TestTx_Rollback_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - ok(t, tx.Rollback()) - equals(t, tx.Rollback(), bolt.ErrTxClosed) -} - -// Ensure that committing a read-only transaction returns an error. -func TestTx_Commit_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(false) - equals(t, tx.Commit(), bolt.ErrTxNotWritable) -} - -// Ensure that a transaction can retrieve a cursor on the root bucket. -func TestTx_Cursor(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) - c := tx.Cursor() - - k, v := c.First() - equals(t, "widgets", string(k)) - assert(t, v == nil, "") - - k, v = c.Next() - equals(t, "woojits", string(k)) - assert(t, v == nil, "") - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - return nil - }) -} - -// Ensure that creating a bucket with a read-only transaction returns an error. -func TestTx_CreateBucket_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.View(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - assert(t, b == nil, "") - equals(t, bolt.ErrTxNotWritable, err) - return nil - }) -} - -// Ensure that creating a bucket on a closed transaction returns an error. -func TestTx_CreateBucket_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.Commit() - b, err := tx.CreateBucket([]byte("foo")) - assert(t, b == nil, "") - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure that a Tx can retrieve a bucket. -func TestTx_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a Tx retrieving a non-existent key returns nil. -func TestTx_Get_Missing(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that a bucket can be created and retrieved. -func TestTx_CreateBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - return nil - }) - - // Read the bucket through a separate transaction. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a bucket can be created if it doesn't already exist. -func TestTx_CreateBucketIfNotExists(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - - b, err = tx.CreateBucketIfNotExists([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - - b, err = tx.CreateBucketIfNotExists([]byte{}) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - - b, err = tx.CreateBucketIfNotExists(nil) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - return nil - }) - - // Read the bucket through a separate transaction. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a bucket cannot be created twice. -func TestTx_CreateBucket_Exists(t *testing.T) { - db := NewTestDB() - defer db.Close() - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - return nil - }) - - // Create the same bucket again. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketExists, err) - return nil - }) -} - -// Ensure that a bucket is created with a non-blank name. -func TestTx_CreateBucket_NameRequired(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(nil) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - return nil - }) -} - -// Ensure that a bucket can be deleted. -func TestTx_DeleteBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Create a bucket and add a value. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Delete the bucket and make sure we can't get the value. - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.DeleteBucket([]byte("widgets"))) - assert(t, tx.Bucket([]byte("widgets")) == nil, "") - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - // Create the bucket again and make sure there's not a phantom value. - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that deleting a bucket on a closed transaction returns an error. -func TestTx_DeleteBucket_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.Commit() - equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) -} - -// Ensure that deleting a bucket with a read-only transaction returns an error. -func TestTx_DeleteBucket_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.View(func(tx *bolt.Tx) error { - equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that nothing happens when deleting a bucket that doesn't exist. -func TestTx_DeleteBucket_NotFound(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) - return nil - }) -} - -// Ensure that no error is returned when a tx.ForEach function does not return -// an error. -func TestTx_ForEach_NoError(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - - equals(t, nil, tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return nil - })) - return nil - }) -} - -// Ensure that an error is returned when a tx.ForEach function returns an error. -func TestTx_ForEach_WithError(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - - err := errors.New("foo") - equals(t, err, tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return err - })) - return nil - }) -} - -// Ensure that Tx commit handlers are called after a transaction successfully commits. -func TestTx_OnCommit(t *testing.T) { - var x int - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - equals(t, 3, x) -} - -// Ensure that Tx commit handlers are NOT called after a transaction rolls back. -func TestTx_OnCommit_Rollback(t *testing.T) { - var x int - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - tx.CreateBucket([]byte("widgets")) - return errors.New("rollback this commit") - }) - equals(t, 0, x) -} - -// Ensure that the database can be copied to a file path. -func TestTx_CopyFile(t *testing.T) { - db := NewTestDB() - defer db.Close() - var dest = tempfile() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) - - db2, err := bolt.Open(dest, 0600, nil) - ok(t, err) - defer db2.Close() - - db2.View(func(tx *bolt.Tx) error { - equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) -} - -type failWriterError struct{} - -func (failWriterError) Error() string { - return "error injected for tests" -} - -type failWriter struct { - // fail after this many bytes - After int -} - -func (f *failWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > f.After { - n = f.After - err = failWriterError{} - } - f.After -= n - return n, err -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Meta(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) }) - equals(t, err.Error(), "meta copy: error injected for tests") -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Normal(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) }) - equals(t, err.Error(), "error injected for tests") -} - -func ExampleTx_Rollback() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - // Set a value for a key. - db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - }) - - // Update the key but rollback the transaction so it never saves. - tx, _ := db.Begin(true) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("baz")) - tx.Rollback() - - // Ensure that our original value is still set. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' is still: %s\n", value) - return nil - }) - - // Output: - // The value for 'foo' is still: bar -} - -func ExampleTx_CopyFile() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket and a key. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Copy the database to another file. - toFile := tempfile() - db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) }) - defer os.Remove(toFile) - - // Open the cloned database. - db2, _ := bolt.Open(toFile, 0666, nil) - defer db2.Close() - - // Ensure that the key exists in the copy. - db2.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' in the clone is: %s\n", value) - return nil - }) - - // Output: - // The value for 'foo' in the clone is: bar -} diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/decoder_test.go b/Godeps/_workspace/src/github.com/cupcake/rdb/decoder_test.go deleted file mode 100644 index 2cb4caf..0000000 --- a/Godeps/_workspace/src/github.com/cupcake/rdb/decoder_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package rdb_test - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/cupcake/rdb" - . "launchpad.net/gocheck" -) - -// Hook gocheck into the gotest runner. -func Test(t *testing.T) { TestingT(t) } - -type DecoderSuite struct{} - -var _ = Suite(&DecoderSuite{}) - -func (s *DecoderSuite) TestEmptyRDB(c *C) { - r := decodeRDB("empty_database") - c.Assert(r.started, Equals, 1) - c.Assert(r.ended, Equals, 1) - c.Assert(len(r.dbs), Equals, 0) -} - -func (s *DecoderSuite) TestMultipleDatabases(c *C) { - r := decodeRDB("multiple_databases") - c.Assert(len(r.dbs), Equals, 2) - _, ok := r.dbs[1] - c.Assert(ok, Equals, false) - c.Assert(r.dbs[0]["key_in_zeroth_database"], Equals, "zero") - c.Assert(r.dbs[2]["key_in_second_database"], Equals, "second") -} - -func (s *DecoderSuite) TestExpiry(c *C) { - r := decodeRDB("keys_with_expiry") - c.Assert(r.expiries[0]["expires_ms_precision"], Equals, int64(1671963072573)) -} - -func (s *DecoderSuite) TestIntegerKeys(c *C) { - r := decodeRDB("integer_keys") - c.Assert(r.dbs[0]["125"], Equals, "Positive 8 bit integer") - c.Assert(r.dbs[0]["43947"], Equals, "Positive 16 bit integer") - c.Assert(r.dbs[0]["183358245"], Equals, "Positive 32 bit integer") - c.Assert(r.dbs[0]["-123"], Equals, "Negative 8 bit integer") - c.Assert(r.dbs[0]["-29477"], Equals, "Negative 16 bit integer") - c.Assert(r.dbs[0]["-183358245"], Equals, "Negative 32 bit integer") -} - -func (s *DecoderSuite) TestStringKeyWithCompression(c *C) { - r := decodeRDB("easily_compressible_string_key") - c.Assert(r.dbs[0][strings.Repeat("a", 200)], Equals, "Key that redis should compress easily") -} - -func (s *DecoderSuite) TestZipmapWithCompression(c *C) { - r := decodeRDB("zipmap_that_compresses_easily") - zm := r.dbs[0]["zipmap_compresses_easily"].(map[string]string) - c.Assert(zm["a"], Equals, "aa") - c.Assert(zm["aa"], Equals, "aaaa") - c.Assert(zm["aaaaa"], Equals, "aaaaaaaaaaaaaa") -} - -func (s *DecoderSuite) TestZipmap(c *C) { - r := decodeRDB("zipmap_that_doesnt_compress") - zm := r.dbs[0]["zimap_doesnt_compress"].(map[string]string) - c.Assert(zm["MKD1G6"], Equals, "2") - c.Assert(zm["YNNXK"], Equals, "F7TI") -} - -func (s *DecoderSuite) TestZipmapWitBigValues(c *C) { - r := decodeRDB("zipmap_with_big_values") - zm := r.dbs[0]["zipmap_with_big_values"].(map[string]string) - c.Assert(len(zm["253bytes"]), Equals, 253) - c.Assert(len(zm["254bytes"]), Equals, 254) - c.Assert(len(zm["255bytes"]), Equals, 255) - c.Assert(len(zm["300bytes"]), Equals, 300) - c.Assert(len(zm["20kbytes"]), Equals, 20000) -} - -func (s *DecoderSuite) TestHashZiplist(c *C) { - r := decodeRDB("hash_as_ziplist") - zm := r.dbs[0]["zipmap_compresses_easily"].(map[string]string) - c.Assert(zm["a"], Equals, "aa") - c.Assert(zm["aa"], Equals, "aaaa") - c.Assert(zm["aaaaa"], Equals, "aaaaaaaaaaaaaa") -} - -func (s *DecoderSuite) TestDictionary(c *C) { - r := decodeRDB("dictionary") - d := r.dbs[0]["force_dictionary"].(map[string]string) - c.Assert(len(d), Equals, 1000) - c.Assert(d["ZMU5WEJDG7KU89AOG5LJT6K7HMNB3DEI43M6EYTJ83VRJ6XNXQ"], Equals, "T63SOS8DQJF0Q0VJEZ0D1IQFCYTIPSBOUIAI9SB0OV57MQR1FI") - c.Assert(d["UHS5ESW4HLK8XOGTM39IK1SJEUGVV9WOPK6JYA5QBZSJU84491"], Equals, "6VULTCV52FXJ8MGVSFTZVAGK2JXZMGQ5F8OVJI0X6GEDDR27RZ") -} - -func (s *DecoderSuite) TestZiplistWithCompression(c *C) { - r := decodeRDB("ziplist_that_compresses_easily") - for i, length := range []int{6, 12, 18, 24, 30, 36} { - c.Assert(r.dbs[0]["ziplist_compresses_easily"].([]string)[i], Equals, strings.Repeat("a", length)) - } -} - -func (s *DecoderSuite) TestZiplist(c *C) { - r := decodeRDB("ziplist_that_doesnt_compress") - l := r.dbs[0]["ziplist_doesnt_compress"].([]string) - c.Assert(l[0], Equals, "aj2410") - c.Assert(l[1], Equals, "cc953a17a8e096e76a44169ad3f9ac87c5f8248a403274416179aa9fbd852344") -} - -func (s *DecoderSuite) TestZiplistWithInts(c *C) { - r := decodeRDB("ziplist_with_integers") - expected := []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "-2", "13", "25", "-61", "63", "16380", "-16000", "65535", "-65523", "4194304", "9223372036854775807"} - for i, x := range expected { - c.Assert(r.dbs[0]["ziplist_with_integers"].([]string)[i], Equals, x) - } -} - -func (s *DecoderSuite) TestIntSet16(c *C) { - r := decodeRDB("intset_16") - for i, x := range []string{"32764", "32765", "32766"} { - c.Assert(r.dbs[0]["intset_16"].([]string)[i], Equals, x) - } -} - -func (s *DecoderSuite) TestIntSet32(c *C) { - r := decodeRDB("intset_32") - for i, x := range []string{"2147418108", "2147418109", "2147418110"} { - c.Assert(r.dbs[0]["intset_32"].([]string)[i], Equals, x) - } -} - -func (s *DecoderSuite) TestIntSet64(c *C) { - r := decodeRDB("intset_64") - for i, x := range []string{"9223090557583032316", "9223090557583032317", "9223090557583032318"} { - c.Assert(r.dbs[0]["intset_64"].([]string)[i], Equals, x) - } -} - -func (s *DecoderSuite) TestSet(c *C) { - r := decodeRDB("regular_set") - for i, x := range []string{"beta", "delta", "alpha", "phi", "gamma", "kappa"} { - c.Assert(r.dbs[0]["regular_set"].([]string)[i], Equals, x) - } -} - -func (s *DecoderSuite) TestZSetZiplist(c *C) { - r := decodeRDB("sorted_set_as_ziplist") - z := r.dbs[0]["sorted_set_as_ziplist"].(map[string]float64) - c.Assert(z["8b6ba6718a786daefa69438148361901"], Equals, float64(1)) - c.Assert(z["cb7a24bb7528f934b841b34c3a73e0c7"], Equals, float64(2.37)) - c.Assert(z["523af537946b79c4f8369ed39ba78605"], Equals, float64(3.423)) -} - -func (s *DecoderSuite) TestRDBv5(c *C) { - r := decodeRDB("rdb_version_5_with_checksum") - c.Assert(r.dbs[0]["abcd"], Equals, "efgh") - c.Assert(r.dbs[0]["foo"], Equals, "bar") - c.Assert(r.dbs[0]["bar"], Equals, "baz") - c.Assert(r.dbs[0]["abcdef"], Equals, "abcdef") - c.Assert(r.dbs[0]["longerstring"], Equals, "thisisalongerstring.idontknowwhatitmeans") -} - -func (s *DecoderSuite) TestDumpDecoder(c *C) { - r := &FakeRedis{} - err := rdb.DecodeDump([]byte("\u0000\xC0\n\u0006\u0000\xF8r?\xC5\xFB\xFB_("), 1, []byte("test"), 123, r) - if err != nil { - c.Error(err) - } - c.Assert(r.dbs[1]["test"], Equals, "10") -} - -func decodeRDB(name string) *FakeRedis { - r := &FakeRedis{} - f, err := os.Open("fixtures/" + name + ".rdb") - if err != nil { - panic(err) - } - err = rdb.Decode(f, r) - if err != nil { - panic(err) - } - return r -} - -type FakeRedis struct { - dbs map[int]map[string]interface{} - lengths map[int]map[string]int - expiries map[int]map[string]int64 - - cdb int - started int - ended int -} - -func (r *FakeRedis) setExpiry(key []byte, expiry int64) { - r.expiries[r.cdb][string(key)] = expiry -} - -func (r *FakeRedis) setLength(key []byte, length int64) { - r.lengths[r.cdb][string(key)] = int(length) -} - -func (r *FakeRedis) getLength(key []byte) int { - return int(r.lengths[r.cdb][string(key)]) -} - -func (r *FakeRedis) db() map[string]interface{} { - return r.dbs[r.cdb] -} - -func (r *FakeRedis) StartRDB() { - r.started++ - r.dbs = make(map[int]map[string]interface{}) - r.expiries = make(map[int]map[string]int64) - r.lengths = make(map[int]map[string]int) -} - -func (r *FakeRedis) StartDatabase(n int) { - r.dbs[n] = make(map[string]interface{}) - r.expiries[n] = make(map[string]int64) - r.lengths[n] = make(map[string]int) - r.cdb = n -} - -func (r *FakeRedis) Set(key, value []byte, expiry int64) { - r.setExpiry(key, expiry) - r.db()[string(key)] = string(value) -} - -func (r *FakeRedis) StartHash(key []byte, length, expiry int64) { - r.setExpiry(key, expiry) - r.setLength(key, length) - r.db()[string(key)] = make(map[string]string) -} - -func (r *FakeRedis) Hset(key, field, value []byte) { - r.db()[string(key)].(map[string]string)[string(field)] = string(value) -} - -func (r *FakeRedis) EndHash(key []byte) { - actual := len(r.db()[string(key)].(map[string]string)) - if actual != r.getLength(key) { - panic(fmt.Sprintf("wrong length for key %s got %d, expected %d", key, actual, r.getLength(key))) - } -} - -func (r *FakeRedis) StartSet(key []byte, cardinality, expiry int64) { - r.setExpiry(key, expiry) - r.setLength(key, cardinality) - r.db()[string(key)] = make([]string, 0, cardinality) -} - -func (r *FakeRedis) Sadd(key, member []byte) { - r.db()[string(key)] = append(r.db()[string(key)].([]string), string(member)) -} - -func (r *FakeRedis) EndSet(key []byte) { - actual := len(r.db()[string(key)].([]string)) - if actual != r.getLength(key) { - panic(fmt.Sprintf("wrong length for key %s got %d, expected %d", key, actual, r.getLength(key))) - } -} - -func (r *FakeRedis) StartList(key []byte, length, expiry int64) { - r.setExpiry(key, expiry) - r.setLength(key, length) - r.db()[string(key)] = make([]string, 0, length) -} - -func (r *FakeRedis) Rpush(key, value []byte) { - r.db()[string(key)] = append(r.db()[string(key)].([]string), string(value)) -} - -func (r *FakeRedis) EndList(key []byte) { - actual := len(r.db()[string(key)].([]string)) - if actual != r.getLength(key) { - panic(fmt.Sprintf("wrong length for key %s got %d, expected %d", key, actual, r.getLength(key))) - } -} - -func (r *FakeRedis) StartZSet(key []byte, cardinality, expiry int64) { - r.setExpiry(key, expiry) - r.setLength(key, cardinality) - r.db()[string(key)] = make(map[string]float64) -} - -func (r *FakeRedis) Zadd(key []byte, score float64, member []byte) { - r.db()[string(key)].(map[string]float64)[string(member)] = score -} - -func (r *FakeRedis) EndZSet(key []byte) { - actual := len(r.db()[string(key)].(map[string]float64)) - if actual != r.getLength(key) { - panic(fmt.Sprintf("wrong length for key %s got %d, expected %d", key, actual, r.getLength(key))) - } -} - -func (r *FakeRedis) EndDatabase(n int) { - if n != r.cdb { - panic(fmt.Sprintf("database end called with %d, expected %d", n, r.cdb)) - } -} - -func (r *FakeRedis) EndRDB() { - r.ended++ -} diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/encoder_test.go b/Godeps/_workspace/src/github.com/cupcake/rdb/encoder_test.go deleted file mode 100644 index 2203e3a..0000000 --- a/Godeps/_workspace/src/github.com/cupcake/rdb/encoder_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package rdb_test - -import ( - "bytes" - "encoding/base64" - - "github.com/cupcake/rdb" - . "launchpad.net/gocheck" -) - -type EncoderSuite struct{} - -var _ = Suite(&EncoderSuite{}) - -var stringEncodingTests = []struct { - str string - res string -}{ - {"0", "AMAABgAOrc/4DQU/mw=="}, - {"127", "AMB/BgCbWIOxpwH5hw=="}, - {"-128", "AMCABgAPi1rt2llnSg=="}, - {"128", "AMGAAAYAfZfbNeWad/Y="}, - {"-129", "AMF//wYAgY3qqKHVuBM="}, - {"32767", "AMH/fwYA37dfWuKh6bg="}, - {"-32768", "AMEAgAYAI61ux6buJl0="}, - {"-32768", "AMEAgAYAI61ux6buJl0="}, - {"2147483647", "AML///9/BgC6mY0eFXuRMg=="}, - {"-2147483648", "AMIAAACABgBRou++xgC9FA=="}, - {"a", "AAFhBgApE4cbemNBJw=="}, -} - -func (e *EncoderSuite) TestStringEncoding(c *C) { - buf := &bytes.Buffer{} - for _, t := range stringEncodingTests { - e := rdb.NewEncoder(buf) - e.EncodeType(rdb.TypeString) - e.EncodeString([]byte(t.str)) - e.EncodeDumpFooter() - expected, _ := base64.StdEncoding.DecodeString(t.res) - c.Assert(buf.Bytes(), DeepEquals, expected, Commentf("%s - expected: %x, actual: %x", t.str, expected, buf.Bytes())) - buf.Reset() - } -} diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/examples/diff.go b/Godeps/_workspace/src/github.com/cupcake/rdb/examples/diff.go deleted file mode 100644 index 4760769..0000000 --- a/Godeps/_workspace/src/github.com/cupcake/rdb/examples/diff.go +++ /dev/null @@ -1,65 +0,0 @@ -// This is a very basic example of a program that implements rdb.decoder and -// outputs a human readable diffable dump of the rdb file. -package main - -import ( - "fmt" - "os" - - "github.com/cupcake/rdb" - "github.com/cupcake/rdb/nopdecoder" -) - -type decoder struct { - db int - i int - nopdecoder.NopDecoder -} - -func (p *decoder) StartDatabase(n int) { - p.db = n -} - -func (p *decoder) Set(key, value []byte, expiry int64) { - fmt.Printf("db=%d %q -> %q\n", p.db, key, value) -} - -func (p *decoder) Hset(key, field, value []byte) { - fmt.Printf("db=%d %q . %q -> %q\n", p.db, key, field, value) -} - -func (p *decoder) Sadd(key, member []byte) { - fmt.Printf("db=%d %q { %q }\n", p.db, key, member) -} - -func (p *decoder) StartList(key []byte, length, expiry int64) { - p.i = 0 -} - -func (p *decoder) Rpush(key, value []byte) { - fmt.Printf("db=%d %q[%d] -> %q\n", p.db, key, p.i, value) - p.i++ -} - -func (p *decoder) StartZSet(key []byte, cardinality, expiry int64) { - p.i = 0 -} - -func (p *decoder) Zadd(key []byte, score float64, member []byte) { - fmt.Printf("db=%d %q[%d] -> {%q, score=%g}\n", p.db, key, p.i, member, score) - p.i++ -} - -func maybeFatal(err error) { - if err != nil { - fmt.Printf("Fatal error: %s\n", err) - os.Exit(1) - } -} - -func main() { - f, err := os.Open(os.Args[1]) - maybeFatal(err) - err = rdb.Decode(f, &decoder{}) - maybeFatal(err) -} diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/dictionary.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/dictionary.rdb deleted file mode 100644 index e0ef5289e4615c6b35e5fbe03ad99bb70a8aca08..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 102032 zcmW(-Nz$b}cJ;su@KR7mGXx2cKoUY7f{DXXc16^rDx$&@@7Sww_`LiF)#RU<-+Lh4 zd+up&T$M>(*RlWmmw)}Yzy9N&fBCO}|Ifeu<-h;-kH7!NKmSMmFZH87gE@JW+)&wS zojQBV=TZ+*->W(`(>{(>++iqD5aLGamWLk?Wz+Z(W!+p?;pTJvSU-lttXdAweiWBO zoBDHqw&>`5^_}0=aq@CuzMMh^y;?gSr}UlCv#_bMAh$y+b68sWNLN36d(@hStG~wD z7G{Ts*s>oBJMb~DNcG^Jh0HhWf?-WlLPgrH7N7p|zI)i5s5m{J34Mimrf9A!vPGKx z-1m2UUqa}0enQRWPWYQW&3o1Iy!C$Ykn6R%l}=;SZ6|u4cfwb?FLXF#cc*80$Q~md z$;$jX`aMozOR=2G_ub3rY8Qw8nC&xgP5HA#f7>?Wkq|9bH3WQ)d+wAiLrdYjHnx7F zacbK`DW`ECkUpGC~=ybnyPFwTvdpB|W*1O(EpZLzl<6}e3 zG{}8fALf^BCp4>Bw%uQRE_ImW)}ENIH16x0TBeKavgqnM_1v=iNv55QR$a7l`4dHl z`5}26Se#NLvRj8;p1I1_(d05Q!#&SQ_?;4RE%!-9=y>%V-saRzV`|s4nEtiHV%*5+ z6_s?wY|g7S)5*2y-=hvA+v%)Vn#NW-vPzt&TEx3|H@Sm*h5pv(KFe#VnXAWx9CUjm zqjJ`2a@l`#izDt4$=$jM*5tGr(t^;Dbo46yH%O7f%s#b^ad{nAQuyxs%CfoR!kyvk zvDx|9mURr@Lba&wxxHyGzv^}RzAWp~Wc}E^#(8~WrPsAjwl&*7=f25~KI6KiF7fSq z@?&Fd`5~OTZ98+~Y|k;sOBUU52<5vfrWpP5v9q;Jjed9NW7bPFLE+<%_-aR*Wln^A zOC9=gXzfd8qRf!W3mvp#cn@4&gqbQ*eq+jR(v$0q=i;->kJ+tN#DQ*X>*u9U+Y-gr z!o_2I9zkgrPWkkkbho_k_3=IT&w&Rv-1HKQ9@UcgOWeiRI2xH(@(xAnQaA~l&o2oh zfAW4kwE6VXVIE#SsNEK^Z*|P$`WjE(W%zmq&R1)Ua*RnpdEY5h?aJ5n@|`^KC2wRTZs*~LwJYa~gVbm! zjG0?6Yv(QJ1AXSj5q=!?UALX{Fg-WEe!bZ2_bjX2E-`rbuHQ7zrF&8LsBb8!?~*S+ zP3*gb6kW(WU5<#qb>sA1pW&98)CVVIkj}pJ+P1+Zc_PNWl9c=W#x2-w*R=6(Q|PRR z2sg7Mam^-=o;&(!Rjpf$7h)2(Cd*OPd(G^i_TE3a_N}Xr?5nyOV`|mR_B0(t7#Hq| zDfA6Dx^?W1y;v&mI>*X}$?RQF!+gY-=TmjJAil8eW?z8TixGI@w}~JZ0&UGS1WG02Y$|FTD$3CaY;q3 zCQVnGp5t}90Un@RJ1P6q3rW1j?R^L>nVy?oKU?FU=h-IEQW5ld$f2#ru}1c zbp0f*ZCR?-Q%(CYo@24nUYd*|>FF_Z#E%{ax(0rm*H%N8b5Pgr%Q#wctHWLsRpmT{ z>;(G8;*DVBSbIY+{*3o|*l%)Vc%Qm2ruG9_0w0#%^=k7czLA#|Xh)20GM>6)wcl5t zK}EhK`CW@7TW$+E&cj@Zecxizee3y|>S%l;5A9BcCNbk(^xaMq=Vx|3^RyhVwI7w+ zNZUa}li9t`hG#p`BFI>hu|u8)tvTAb69#V?xTFmwscd<|i9QwI8hvBG1!|uwH5XnJ z{l{WD>Ng6<+35H^sICOg?3B;E=|)w1Y`ZMIsq#(IY(?}@e%m3t*AeZ#D9vqg7`E3B z)51G}CKg#xc#gxYJV}c_baAxk;W!Q6tnmdeeJIdpsw< zUH`IGCzAyL2|?XDWWN%#e&m&H@}KZqs7q6+d#)ein3G4DANQqxX&5eQ#;scqG&iy_ zPRxiyU~#{Mw1Z0>+R(^iP~3!X#A73?+3AV8{(kgQQ$J;0lWo4#txIL^>NMo=sb&5S z@eL|nfFF!Y8cV-F5ftt!aZy^THLxaqd+Bd_UMM{|#B zUH1pK^Ld6DhnOyBO^S^3R^)hZ_ybpee&x&TAmeFGbU^q~;R*`j*t~VmNqs_JxWo2? zYp+W2WlKJQNKPZVGn^BCA>Q46ocVn_TCeV+^P1nCb<;B4Q`=S6{#k1l`i|eI-rdM| zbETe`UUV;k?0QbSq3cx%2e*^Ygf5+Os-jthoW6dnPjz%1B3^RvYZuyrR~dBj-Jqv1 zoMhQ2?z3UBN5jRmBag4f_P)5cQ{Uw=kz)ab!`pRdQ!;&4eJ5YqzW6s8GzQ}Ewx*}+ z=5}qvxg>%DX0M*galO-RtAE>rDa380GH-4|s`cE;!2nJG#4HZ5O@3sjm2MYC#c&U1 z-X|QKvRtG*^CpJ1nSK^)a?3WMb`n+5uSAlKva1EEJEpkkDHRci;+_Jz)bW;?lf7g+ z-iA@OJxca_ccCNWIDMZ*Z=I#!Pq(_f`0M!&lfyLbu97$|9~J(l<(_uTKQtW4<0_O* zoP9bX{zDNU;nOTL@cIy^r>uukLXk8)URr~BZ0s9x-Gnn`%oK+``L*E&wfW5`griqN zD>Px@)B060W?Wf!+&x;xuK0n+W3F{;{g>_cG^5k$b3X$-d51BrebaHT2hGgNWvb4+ zYg*TX5JpMh3QvD+x9tx;-|ZkCf^`hL)d!Z+kfimaAObt^&Me+cY3Zk_@l+( zTNY23IAc7!S+q2KbTB>UR3Gj+m8M$R(n3cOwsEy1JD8aY%ER2B7$82s0mGQ*{Jb(z z_p7ei=PT7&%&V$xX00V#j$ZVF%$W7fH_%!j$^7^G`xaz888oT1nhiN{BYP88Qh^(?}8c@K7_Z+EQtF8QvpKb;2< zMU(5O%=O$-+vF4NXv!=SW1ZYqGqLG<^F$n#A)IsYo=x3?*DF|8%-I;ghTrkRi|wq~ zdpZXRVxwTzzNw#a_09!IKIZ2obPxWRKO9vrzm#PZBJNKw-rj2Qs1-B<)d7shIO9vEMcy)*1O0f1z{cE?vkj+n11h* zI2?V72qSRMLFY7l(OtXcfUT!HJGD1?sPaW1l6xA`YadKz+N!J=Mpz7q+tlvVNnmxs z@KiV9Bz=ZS?4c18_K8K(zxsL)l>Gh&NGpAgg*qM57l!a{Qa_X!fqA{({q{{sUXxNh zuiy6qQ_S~sdRisspLwnw|MpH(z_Kd|$uc=S-|a2{LT~dN%r0(?`^(isj{%6VmgBB? zIr}+GakXsPoYIV^rDu(4%+lvf(3!-CNu5)n$~v$~v;aOPl+{hw?tIb@c^2*?(;rVayfThfu4fPMve=$s6m(o} z?0pMBRahD7;u%DEgCQP2^*vEv&_?u-24|?Bc5~cA5c@J_uQ5uq@ZJRE40#XXx#t@i z+jzBU#$;OAtEs;Fc#zKPP@V%XGjH9N`^D%g27o;8YXxmJ4-czZO3+({N@0!ATid&L z9s^w(-|McET@B+yHw^jYvJCrsgl*{ESJ??PzKS{ZGUEY#cefdphDv;=&KH3G&?nPE zEI!VmN6#$1p6MQC8h-akD_!5MD|cEehRd4-3S@iXAs;(OtFsiJgYZ6}`)zwoCaGQ& zqy26h(WjFt*Kcz!p}*?#>C51SKJsMq_|PcK*`<$M`*LBHv>5vv%IpawuZGoGNvvh( zJW>6os0Y##U0>xeRaUdrW7vF0=UF|g?BD4|_4~*zhH>ad9}K8!e*uZ?odI+oV-GaG(_43qWo|D<1dadwh!2 z7f(=nhtVA)F)C?Puf#;7MG<;F9rJyRBfUmH$mW-H%m!_T2Jz18HeK5crU_SeZbab!vxpGSWQ()PO$+ z+ildJ7)j$JM%p{`5t2_WSt^s2`yhduqmA^V^uw5T_Tqfw4gTqt^Zs@VowW3r&&krd z{Seu;`MXe=BRlUtMU$o4Xt(mE6CY5NpXN*-gRY~VLYux9j5;rF{wzV}bQtjbccky= z^gVwQXLA!O$(f2xPTOYKSH3+pd^w(*ta9czMps8Dn^p*g*QpIvvq)*~Wch4IzN4)KwP^;)6en8(&6W4UkHM<69hDFzkId-+Tw`^v<$``v zI(DMv{i=ab9jMMnd4pDFw-Yq`x*8=wnz;F$aFAK8Wb4#ZzixcPJbd#gG%Z^hal>lk z{ka`R3{0$UkyUFuFt<$$8OLX^Y2BfWOLMU1oeR!w(W13xv(J zeR02r9FN(14!Xnlw^$dvq4xIrLOtsSVDFhMQ@MeY_UVLbGFr-jkgNee!VNh&?BGnjd{uS*W)cc$Wek^9~*NJqU- zGd{`+G6TbNn#cRiwAKp~mg*K2pF2Fx&#o^e+(1pxP6jZhL+!eauJbVO$5ibN{YCTP zFa?Ux?7zM@3Jlc=W^lw<9>5GF*L@*s)oxRUb?!iPqn6@Lgj0jDm$9rjiqvU3@P%Y6 z!oMJfoUZmI`o0EErKH17^OS=G(ReUjt|Nel!KX;LbY}EvEz@_;qiz+Y0Q${Tc4$N3*ufXa@w(7RT~aowc!V7+Ic8}4vp|CXK;JE?`Hk%T_*>Dh6LuT`{>HR z4g0w}_=b^aH9Xd71osmsHI|@_#kip<8%a3IaBvQ}7Sg1OD7NyLCkB48X?$-%d)w+z z$oJ5yP#xdmIY9C>hH4rQ zb(Q==x!WTV_m*8A&e@}(_z@ahPA9a4WuX=t46e?743yvRIHH>O%UGcjzGpG*V#fOC zuG?_MxQ&Yym%h)T|D2zkrZgrcx(O=y+ijrY2Y+M$9Wx}QwY%JNF+rJqc<%C4E`l@m zW^&Pu_YYvWn1Z1{afI?KHxS#oXnoM5mX3*8sZFo9?;*D|D$(C-Z#8 zJFxzfpy15#Jme){Ps_A*juL54+D`=o;PgVFS^91v5HO+LeFNCbZJKvFO%&~Z&G+7m ziwZy;C)YI`9X`EF*So&;h|RF2L+2ky193d>2{=HH@;Q!+0!;NNt5?_-2thZdHLbo@ z1UT*$XKn(5SfMlRrfz0K@3cwel0_F{VfaHwT>QB z)lD1mxy%$K-l{IGr)^EF?;tuT6y{L{aHeD9U=oJ@#_XR@h{7GgtG^u>BX0$`^X)_f z!L(w{F^?PSUmM1uyNZ}gOX*s`keF-iug+rj3x|)rWRQjGLrz=;v6~J0(UV*_4SS4no%i$#YFf*QbPj z?#_!vTI{-^B(ecC%K}~~!jiDk9L}Veo6=(Gqb7WP{OJDkE0k<{_h#Gg_3bnuUBp~J z6BOF#7%Y+eei({lgR*{`#%7F=9>;Z9-0&$6>I+bcqF1`M%mTEjuYf>1!wD$xd~{9t zURAJA6Wwz^tlP@>r=eI*YER$V;uKF71bI=g7C13D2IxJb*kT}*^jo$w!mP1`JwA3NZxe&s2gg}K_?Bz z2Sr}o6B$5E2oM7cB_kcTJ9hgG%Z9GoC2?OC{^jpGYnc=Dv9l}?{8nw z`))$C=s+HW1nR#H3TJoZ7L_mk$cr2x@UNJX9A4&itpQDrz~<79q2f{>dG2v9JNO?s z6h`p#^$GKRifrd!aFS5T2a zh0Aa#E}zy9({>67342=|S1gJaT;_qd>*mfOoMA!qe(-onv45^>qw~_|d_pvwG z-Vds;p9wipnqQB(S}K3hv{8&cF2ULpeC8Z!S! zWTSO>c!&VGFDLKy8bBsLpzvYAkSvsAI@SeL%)P+Jg8!`xH0h4E*|@3YSG^5=dj!cL zz{y`N=ggt>@pao&P@%shNxP%Die!90QT*wA~6i{AxXJ z#o584_HB^GWHYntfK3)wtWuIX&!9$%X_7TdL+aVbcC>nDG;d*24KvBK!WaI8fwH-?CIAa6ZwJ{6MNtmPTYF~o{ zuF7&5u^&m9L3DedP4v;ds zvIIrxNt?Rgf-g!f#%<@;i?V_**xIte2@=K^QUa>L_8pi+_TH>3Yz)uD-n~_gk^93$ zXg`u3S`yqP1`vci``j9|_IVr}gfOt&@qeNRtbG6umt_L%iI;_QvvU@n z8I9>1CsWq3yLIRxzTK6-EyCLgzXbJbBopnvB@V3{9`#IHKTTP^jEyC4%{G`}T6dJ2 z7@Ttp;|q7qpzm*mK>wdMs2oGOCOVG_eAfF7+YbJ+yj7SwtWZG3$zB1j^?{AGL1EwO z4j+(*k*s{4&c%UzG8!hBL9G1>#_jFR>5fBG3}T+1q(hAo4WhQ?%iimeOr?hzVuTt9 z2L)!ND>`6hE0%6QCG}7vZ8^EJyDG$Pc;QqYb}LWHI)KU+AGnwq^51bkx9s3o!_+n5 z>_EIxa1{e?@)sZsLikpI@qiQR4&s2UQyO93Wd}F0qpB{`7sH%OSB4Yhy@W|_;bnv< zd6vE>{s#GB!dbd7eOK@eHeeWjK_mCF9PPop#VNd|*O5b<~ zQ+aqx$8fw$8d1EY&mI!cy~C)(1ItBa}|45e(=GoX&bQ=sH`c+llX z6IW;_TWA}g!jJ6Oz`|e@b8>^+4GSgQ>3pA6=ET9n4Uw5agPw*UK?q_JpGW`ty2U%= zr^8l->1#*#TH46>#bV%rzz@o}yfNelWqO2(5T|{#a2b2uu6e)VCQYybhq-GC9>SpZ zb7a+Y?XzTS70{Bwj534@mTC6n0+Ucx!tjpa95uj_P=V>_DGL|!I0aN?SXJ6per

vC%2}&B6t@h)DBb8eC zfVUHFtnFiP^+KdNZ?z~2{|L+}XN|vhEICZ{}BG~~0=>C;f{ zP})^lx!F;SM>~b6h5A+29A%{#U}a-zP8itX+DM>qBaN@m0#K7TXK z0I6w|-}nWkUw4^?IU?2FPKVpji0o(mxRO@C_kjZh24@FXSi?CTNv-*vwKnhRJ1T^cLyvukiMbD%iMgR@RjE}Kd@0%Hbdd%@RF=Q zCxN?CU_2<}FF=FqjRba%Z}^$?ZuqXB(k&}yBARW*-Yqg_b3SdiqTFmr2L5PLreHjVo&|>$FTQ)F=>vy|;-mZaYVkY>C z#ItIUTqu1Fc(SfKHw!dy@%DF7sxTU zdwc<;X(Hej(U$$Dz!~BBpnTu+ zWyPonhL!-uieRK2LH&Ci)5P*p(gd#`iy749H^VnTF!l4pe^#g)U>uAhyv_?Gblk;7 z`?p6fy0`x{?VQ0_|``pa-U26~)*pS!rOC@KEtFj1)0JtJ=ZdRTi0-yA*nPpGx$^}?0c4g;OQ?&2oSd6F$Np+tgsVo z)HP5wb$CJJYd9zIGWG*Q+WGODfuBelJaw>c_Hs6$NAkxJn0gI==FcyXSI=hWqCtMX zE_T{vs1PV}(n<1STAZu?TD%P1TLc~)7bFMP6oGLeFanNhi0>%t($y(I(t8(S^XOqK z<^oBhDGw|74F%_U*3BV3wqx?DY3p9Ng*}{J9xj!2C^O7OtZfuRNG(u(`fx*{dU`gQ z38|il_JpX>^=we4-#P6WgUAe@3us%oc^&zhKs&=Mz&T(@f8O9Ye`Xz!^6S}l@!0dx z$I|aIfbQsqa(s}prwUkct{{1ADiQ2OV*_Lw(8dWKA##;tc~)^kg{2Urr9ZYFD$5iE ztU1@b@L|lA?XU}WJ~hxL@b&C)7OOk$7cAsQ61BciHFl`kh%7ni8E_F=e-pca<2&c> z;bll8AH+A}DZ`wWkne%zJrrg6id#vF+ph=a)M-0}Fkou0C!j`+&<7U4 zUj~aFB;D?UFF=N6f>n$P3eQBCX1Mg>{y8UyG(bNi0zu(YG`%#;EqC%H_n^1el+T}` zUF?X@h_x*s4mVI=#qyb%WJ+wG@;#3QebqEA7&d|wzW5AVwo-~EC# zK;Q&g@cxx1ZQ9>DZTVAFFe?s<#@+O^L+VE!=sEy+n@3Cr-wmOy1+Kpz!Mct!{OJZW zi%1}et{J)~tZh3+ICMK$=a>AmMqsSbD6yvJz$5sYtDoUg#t(-W0CH;{xn@G4EZi{Q ze*!ZH(t^t-z_x;(Y3ovr=*z1%i%g>a`7-+kisA4TDrgU1f+GD2t5Lr{qhUQ_2tPp$ zHVYI%)Hgh_Q(b^fGIv3Oxm{I2x3L&K$FlnP0OAem*ba>M?Pa5)NsQlv+Tx3 z$k{L_O!1u#+AvAX^9My9W*K;jF_I!*(pAbjn}^4tT!;QV`biDCv%l8D`aiY&;zt4% zvr49b1}B_Bia}olwNxl5cA_v%V{HBzRthp1pz#-?&IooNrt1z* zWV)hX!Ert&4b+_&-bh5&2L6CIAOVW^LO!TN9loR{>qiOX3`k8CYTttF*yU)8Qa30%+)(y89tOi1k_DRipLvEU%4ZUK*>|6K)1_og*o8c$g zk?aohwD}nD_cw)hmR*@E)gB$pzaa_u=gLr#@Tky(`cSShB&L??cm=kp?5Ku79nqwz;1rm`+ zB;QX}a}VA6gZOgsF&%XG$T_U=r+#4Z`CX@@E&vyYCxZ`yiX}e7qZvGS&<-%w|I!$k z*I<=GCIHcCa4^j3D5@S1HGpLvVP8rQ2>uT)PX}nX^Z@34gtP;mX}_isX$3dHlo$<$ zwfl(48i$7qNvHMT{g)@J#lWHo?T;y7k5^p&L>z19t+NhRM^+MEkIckI!;a-meH-(S(dLbSXrRG5;W`du1_o+l55x8Y4`%ufUE2Tt0A& zRGtn)$qt(s>JMlENNQlZASQIrqXDfIl4EqL@G(EV@<)%f%>cmD0Sr(Mc87h)_Az~j z3jr|IPdkNYz7B|z3pCXU(HD_A7||iwgMG(26Zkk&tO>@)l;*WdxVnQwsu}S6C5ZQ$ z-gWPAO-(c40ZmXUh6TzT^2e&axh0|xuN^a_}n)(7{CMyXH|lPhqT#x(|{cC1u5 zVj@!`0+UOx#_Q=6TrKeDLxvpC1NFVb%z&8CkbYJloj{@Vv}wa@fTbJM`3{3WM2s6j-;87^_`MqjsY4=Ogd25lcr69i zS;PcjZ(|A1Jt5bEsbYnvNVy%6_%rPbN^u-M#6+4$4);2+m;tU2+DSw}=kILv{NP`p z;n8u%`jp=e34~i=I3Hkp#SFR4$Xl?y^-sVO3*~|6_VES-=kO?TH{1zmaAUIyP7Tf^ ze2c7ug$MC+l$&?Jssa!3w?!RL4j(WU3v!Zv;#ra*_AB@(ZYG42ezG8Z2%(3H23}Uc zwm4!YLrN>04Yv6|nYJ0&EWX)&5)pCWL5cwZhCoz`vAmAPZs{&GA>Q97Gb0QlzsMzQ z77VxFXJ|nbP`aCt{sZ}obRtJ&1%dntQLI6N=;Ae}omc&g~+CFpx_=Kv{0ssOGMP-KNm>PHrfA|BdbC4wY0`fToxEm~cWqt0IE*-Lk z`-E*E;FcKNO_Um}3#ln1G0@*gPTn8#+^`2@n+wzIisJfUL&Q(TNXoITCc$@+;Y38` z*+5_T>$Q?2m84*c9}%oVt29{g5SFiN7CD*>D;O}pL*t?XEL?=8e*ppXo(OG$mpG6i z@h-!u6RLT!t;E#d_J>UE4-zOW7&(hV&OK|`da(~9p{yc=v^hjX?;SlDZW=HdF|7zk zo;@~h)RV*RIj#Ge!}1+wM46EMY6DVz@BMglT7B4x_2-WO-;tCgV?(J(q1H(oQ3j`I|g#&SFIcc5X zD}v1x)ILxz_HR8mh2jg298X{d2k{_-2g;6u{A0V5P#|7J`|MJNd`y_55SpFRfJp-$ zTDVLL!zvu)Q)vC=gzPUq-_NL({k|!LC~0T7;+L9n-ezrj-&r_Z!T_IeLdFrc^h@^^ z=tK56#Na*TF=AXr255y6aFa%0UqjPs{Aw4Qc zMzHsid?!ph7aSQcHX`VsoMl#oH?WKcdr*)T!Co7f*eUS{B_yD$=g)SkN;i6GW9gR~i#FK;lD->4Iln?IRHm^Xp> zFGvzd?!&C613VK29%>|0p_BG+uyu0@o#FlQ@)G(xJ^=P+-6D1{93eT!Kv$;Leo3CL zdYEE0R#ynn>U&vs64HY`6}Y(Y^Xmmm;GpVe1bnfEMcfg=Kb)Wh7%9&%!FPP`Okib$ zJn*R+HL`7B=nKX##t>V)2K&Y(E&{$|JT{p?8e`Nr`twZN1cm*fu0wv7e+ipKP{{(U zo1n6={0(;n%dubvSXKIo+#s%w?F?f;=y&~2XzUvnlr@b7VZS)+hF^1z9~h@lu~54y zL~qQ3D|l`j=z*O42{$-q7dq_GSgqP#%!l#|{l-|KvF! zd3Z!fnCwQs0J{eZpMZT1NpFO0seob$#&6xR&g2^k%sw*v3F-^1^YE16UnA^Dudnv> zh7aYe9NWM~&~Knp@XrSSCs7E|1|<9Z;+|;qvz$@?mgT_eOD4EOy_DFi_d*k0UYK2i zu}=?&T1DT(pZXd3Hmn}RNlKVrXP0g)8UaA|^0*mhzzIT%gV!3;RzR>4t7Q)01JGeG z9dZul>KP<#t)-O+wghAZe3G!XY88Xo;1Rf1DS`6%`Px$Ld}_xLP5oWyAhi?LOFY^AqbmmaI3YYZ}`n+ zR{CM2Px?USAtIRLpi_7{9PpX*V^fbDdOl%?7nJ808)5+FpZlmyLaxV|^0)%e=Ng-| z{x0cyWOnU|NX>b`2;gDgOo-PkdF^iGSC&I3nO0idPhjh4!e+mVZ8(&#n2V@8;g);(0KrqoFO1MF|-Hd(G845i+~{1gPecsa^U0g4Z8?1 zbHfoAQ9}ESZ9mdl#Ji|VC)idvS&^OrTrcul2xVcf zNoSz^vM`=gMS8(jq#M;&HW-}g80LO}N<<8HMLf@jTwfezzN;!oOhkt*g?YxNp6C~_ zl-q#dMZ8BYITQwhtui|-`LZFaEx->k$jfgBtx!7%o!A425HW(Vuxc;|LO9a_CY;e{qwV#P$)pinsk_NjX913yb+>K!*O)&d@rmq>S1ts;QE3^$zig@Ld`1R zCwz8{>H7_UinV|gtr9)wPFS9!z{~#D5n#*Wa{NprkPYE>QFwu}L%M0zLk%zc5kyYI zC%qv{RN%UjNN+A3!I-(j3JmZ7^;3b%b1%ceFmc6P2?f3(Hi)lwn)iiJ2lgs=;O4@L zfhGZ9tlxvxqg@|CM;PQR;MM7d(FaudbA@fNe9s^v%<8A^uo?skI)a-=cLExX&~y3} z;9KjIVn1&gK9K{P*Xsng=7Tz%Z+KU+$>BJ9tZKwI&J&dIzETaJ1T6*oLA=Cw!xDsR z7cdZE#R2A!?w-tC61I1m4N2>{gD8R(OyY?M?-3mj%;+G*ptL@l69N7Nge?E0|L-2! zHe*)?;{=@=EL*#GtQ1tn@&hJgX{celx|IjBza>`#@zvUcK;LsK(>EacIbZrLH_7EB$hV{03alAM5fy{o=9M zRfla|6Ow+|R{83if+iCuww1s(lEk#<1G7E0fKCR1AYfjs*xZ2cv6$h3VN5~({Gi`N z`c^cXyl9S}h8I{c0AQ;|;RBJ_!eUQQw;Eh&V(_g1DN!uSIuIUIvZWIn91=IFV5`;w zMz7*FZ1@MhTsTg_h5SEB_nGY~mL_3%N_g{A2+)xb5+1?)2a*pr*Xr)d>Z)sw1bcrb z;tlRfIs!Z!G3L^!^31MJ)Fq(YfAecJL}2;o_57!mh zhdM)GqP{!x@!D*B_MS2B6kCk2{SePA^Ct^i>+nj~{-mJfYME}Yt*O_Q`(X%*y0Jzc zDsXUmXSRMzeyAMsZdTK zqfmK1B#6^o_A51#9rACp!&N7@nf1$g& zg7@37YqEcjjCpY0o#PV#P5Q%qnuD&$kW5NS!YHeNnbtL2dN!?PZ`vFHF$V*_)C?I6 z$zItZ#&P!}O<8$D;+iRL%b0_XI>JSWRh;pAe!MGiqq%!gjBKBI1{bS}+x?QpMa;Rt z7O!(DFG;{YOrMP)r?=-SOe8-Cf>Cky?;o?g;*UY)DC~_*;42KV@5lUn?QMP`Pq7g< z*Is!6hS4|OZ(cuCkEr6#J9VgCzSP5PgsMy`_L*|JZT!!i8)i&fO@7Tjmum+`=DFt* z9$ng4Mb2NEN3@`C!N@o9EtU9UYcX;w`fabqi+pSO;`AdY&UH%jl&gT zAyH&rt#nO)#mS#zb(}n?JlTD^wLc98JIGN+Qi}Ay<2Y1Bx8j|AhJyk1IwIzzQjXcr zsU5p;;=l#QNuwpo<1a(OM}PT@RlWqY?Rh_&TU~c;`keoY7f`@b&)u+Z`}bo-9kqXK zkasM*VFF9R8EWy1P3>$QGza}=QmL6@;Sw`3nFh{he{G~0zIhD$1Y^Ww-Ah?2heE#%uW)@42(dVE8<*YAFgyl6e6w{Ot89gI7Q z2WT8^EizizkeombWBXQylr*fP3AF#S*Ee{yJGYzC{ix}MGf$@zo-6MAMHGHhp}-`5 zb^zi}Wpn;)^+tPc-_(3nas{ zAMx#3GIOwk9eKu#Q*jq*vKAm!V~3H|0Rf)=0D0|KCYKWcB(TvsJxN}O14y6%{VAfH zf4Z3Imav*YyPl7H!=cI%J|dgnCcZi|_K2Drix}K#(#;20RXjo+wO&!P>+51-KwlAH zp^Ow;$V^JXtunZ2-gX(N`jg^-co`Q3+ReRf4?%{(VB(B^PXjt~as;6lWGh2*t-8no zF8@xehzOj|yv9->v#e!#%S{wH)`|=Iu@!GU3cRm#{JV~Aa#mzUHPMyHeHjIxZ z;a-9*U>V)QtVS8exnn&2Zm-+RS9`zj;88F;AVW^N4z8BG6OJO>qGj33iZ1VW{`r08 zocneH-rejXdssY?CHIx-;%6&P^NCBi9If8Qj zf>?x|cs=X`R*m_H+bf}bwoYJ;%N|J8;L2Y zLL}|c!s^{&RNK@o{k!&9b9ZxIJ-hF;VkmciD_CVi2d>KN;VxgQ9U&KU@$N84P`avO zd@E)Ov$MCwZ`F{rKw*!rV`9c1L&M*d4 z?+ApjW6U#h+_#JpE;v()!RZ{Sy7mkxI*lnd$I6$wl%u3&SC#J&Cly`}I5^X9~pmuwk zlUlgwPTy4v4_to!nqT-e*9%%SE0UXdi|!?_&QvXZIJwcd_A1QSE_ax}#?TnqLo%Wz9*9jZ~zP`Fk(H zhY|dnfk}P3xdA)n7G#PfbwJw^^9LZEf3gwc8I}P{m2I4@gNkfiwE+SaS>lo?&{| z$HW{QNIk{){chtkS~Q6jhT`q4?jR8O;y6r>T|MaS(X3-$BzE+CbRKWC#Y?w@YQ;y@%fssg)6- zDcfVym=Mu;K5L@$8)E{Icmq5T!FNX<2p1&;YJk@q_4Tb!{>+ZPg*b|oWXys%V)u;i zJGR-hS9O~yby7Za^FuW#3`_Z&-VFi18`)H&y?l#oQrWU)Y_G*Gs#t0&6$&=^T2oPm=TfvZ3!i3JCzM^#OkJ zPOO!(@kBIi2@I-{o|*cWZPGKA-a&@pIF|VD2wnKpf5pU8{w{w|N{rl*5yjffo|P@R zStdq)(febe(Z~r1vcUvOXK%|_nXpEgwi~9Hmdvy3&fq&loh`Ru{Q`KOVFtJh4*gI- z?N0T<3(!ybsP^;3)+7w$`um01P4n)zU`ZBJ6y&GMmK(OwV%F(M%03ViV?Oj&ldXaO z>p+%^Fy|jOnd8HETf8U66gOtuZWU+=6I0Q3Y;5kno3={6h_P5i@agpg>C>?`eaij{ zFELPo5iEaUZOLk38gzuqbdUc+BMs%Jqf2{uDZvkB2-U8|FeMXn*_W#I7SKT5lL~pc z0`Cp53pe-J5T?PDi5IvVd{FfT>g-S3@vy}my+2;acZvCd_I-F%A#DWIh&8}XRrOu} z91A6I+Cz7?NGjUvCn--c1Qrjx37-887(A^y-~r>nDrutz6qL<%RyZ$%(&-fwXe1*#? zk;jc}^m=QkKkv%su*DWyPNz1y(7I5Y)!W9-f5RzJ+7`mg^2?1OKTUtjzR=anW&>F-zO13-CnfMuzfMb zP00sZu4JY@(zG2XKAkA$hV|{kB>?te0MyavCN`R@g!C@^fZ!$7t(uf zyC{tp_F4qN{mT+4Hli3dqi6NP()B_Ii`v|NWL-Bk=&fQGWIW+~IOQWecH`~tLFoHr!^dKKjCzuu zn|+y+E+3zy72L44YUfJ=5m|CE$G21Hfy|t%>G+J#zIwCiv3t4LK!-31$~7b*SU+3c zCw6$dXoMzM!aR&RZ*KBO_xAT2dpolE$v`d^$&Bjd>$3@101b8d&nD;%t%}?3SM6TW zKhMx>eA@0!)^1RNtcj(O97nRulovcyttceoOKf_Hg~q~;j(tO8k9m8?eL(`d<`RW5 zg_rJit%nv$wHZkzH?(KPJoy%_bdY~sC6(>6vY(E`ED5Ry3S@d7<60GW_DY9+q|dZ` zPhM;2hUAPn?CZx4?N-KD8~OOysSJAO+pl}q8*Jzkcil1dJwtjkT_xJ27$uy38Y@mV z0}Vcy==}UN@n!z{`(yLCh&q>aI13!MAks$Fo#vWDK#ds6TzMmE3@u-uePcnG`U1#^ zx^BQlMQz(`-%IlT<3-5WqqCp@RlERa(qxiD>Yp+#7TK8>5k)rbB3J`!mH z$dgfJr4P4maPlsQAZ$vG)^&AzW!}k!H~O|o)WfllDHK+)0>T?5r>)JOR;(o}BQT2^ zJ0nQ9KMKk8!VuYmet5HJec~2J_id3GMSA7&m^eW$n%%aVqRRubK+%DZ1k3jtsDf(9 zh}e$)Z@#p4LVtoHbA*7K0fWPZp?x71xG-{agfRKG;k~-C2`A5WNsug~io!|A-k#n9 zvti}SfocJG+ipx^sPk+pf1@#9)z8(q@)Ld{p9K@ZOOTzMBTp2N@ST98gnO`0P#3RJ zEa2^O4E|_TNtt~w1*};)XlRMI>Q^^Cf>(ZfXH!+)tuDJ2z*1v}^4VS$o_eZsNg2CK zUkg68FPGIOE^lecc{}_$@GaA`w}veW%ryI#D7xq=Lv?0X#j;c^)FnJpB{1Mda)*sE{T`Tl%o= zQD6oUJttR2%HP-2W^7?oHY55sW!uf<4G-3LklAMz)Jt2pyn{P2vqixxHnDPlFquj+ZPEPAYhO$zdvz7Jfdd7h_i zLIAlsUpc7eX~o98X64uyf63?_s+bq;ThF5XhZ?4DVGSt5ET2jH2FwvE_ z@QSq?9?{df9~2m-=Wd`9%+2?RWu`e6LF->7>h`PLnZ+P6FQPd4vJ^v^r@4JSvDV1_ zw~$Jsdms{w#vUE~^Y=Y6QNR|FU#3xS3=5znSq#fkqiAFhijLiIxa_a*?Jco^5lJa| z@`gG)hQS_Fc9wafz_-!tA`v_7b4(-pw9q|C*0zX$2?%xScMf^X+^ zVFAZ^C4EfsW0mJ(o=RA6z7Ez84@?uM;zaKTbq@^w`ptfR{3-)$^g-INNkVLvHs9h2 z`5isjUCG9Gqsfa#ve#MT>`6{a@Rw|+RmZv8?a~&NV1RvfXmQW@HC+OzE1OYr1q|3+ zd=difJ5A)ac;(@8P-$<{w`j}IM%}B2W}>StquDKkcak%1E%h4z4#NaIs->;LH&xq`MgN!@$ z<|gCiT{bC&hi%d0r%k(cLMguz>97y!nsDwvOKSF#e2o@r+L+*iBG9jcxUsScVGGwk zD%KNIpTCUD?%Q{5My+KjNn`IO&2HuM_kJWCCsduo`SIv*`2Kx<+;sgfp*bf={7x?X z0*~|CCdbN*MV(g*UiI*I)$i{mt7Rr|OF};ankwF;evRX1pK}6A&2LDjP78+HUWygzymN_c3I>hLJ zpE3ZHEJGAsz9DQ(<@nY$Uhig;1tlK$=DYbkc|IE_v{oegzKYa_HUhn{tw`-`WF3&5 zoQy5pJjcw^@bcfVbrfA^AF`(J*T}s5hdcZ`qjxSNV3f?GQfltaUm*D^_}%`_RS~F{ zCwoVG>1!&eZ=x{xeWykg^D*dxHC9VD^r{zS0KENBw<>LjcPo;1FF}h(6yS{YteOFv zZUp)4KduHNV$j0*EtB>Lh77^0kk+Z0a?(klUAHwjs9q<#$U+E$#Kf_uImipCSW#(A zxz-*^BZ*9vKFeV?Qh?qwsP$MV2{s_ zch&A{^K#Rqd>Ze?)pdt&^EF(^G0<_I@2H4F6$Q^O)%^zeE~q zp9#TbBb8?P5%c)cqx65@%fs}OkL#jcx~nob?B641_KAz18tnGj?jj1PLsSk{O?-sr zqvO&~4?5!LugF3}Edv*})Yo_T63YxcpugwV*Zs#0&|2CQ_55NuG8(^e`x6*ks2v?2 z(}9B{qI+R~TVvtd$xbRyN5Lcxa<&ON+ULtgj5P5nVt$A%bo6|43=lS_4-q)7Sgg>L zSPd2h0)TV-G2p*+x1~f`54VUviZiK%FR|&?5_ewWtgBc><606D+VS4Jh%Gq4k!)b% zX|iW+c2{Q{D0ZwzgWJB??{>$XL#34?9LFoC_FxKstrYe9G|=htGlIu`u;#VzMPzQK zRrqoN<%(PFokZfgQC$r_3#$R#UsBS6<^~~*M+;`>H|hs6@hjisT0_KEitJor$n`*f z$YDq5JATE$&bc6q7MtH(!2b&_!4q?(a#HSp--zS;7s2LJFc}e3R;l`17}@QC2}-+t zR69v0;pCN|HHUMo5KOS=gN`W4Y)?zK=O(we1H-azN;RuL`mj9zOVaEo;=|#sgY?m3OG&-B6dUbe)2zk@0FRHq}UxcGX1U*4fnK+ zQ}z|g)8ewqWcjhZWv+qxVP?VS_196&h*T#OW%db8#R8(gv8Z99iU!G5e@~sGAqymr z5UE0^9zSn$paKwknAT5XJ?Wi*>8a{=ww}56c|`Z%@uG{a*s(SMV9kest0tr4Q}6hz zZQ(>{n#pQ6>5cdOiaG!+q6MLI`HrzbK!3BdV~)7*qo1V#M_~TfCTCd=>6)Y}iYsP4W( zICxQQ4n9^yqhco-%>ROg9Qn8umosdR4K%*Eg!L&_NZzHk!piI%QSsw1-Ve^E&y}6_ zMZHb2ZA2$baic)lf+&nrIK){U&lB0QeKw!a@R>H6qKriw!xZM346RIt;SnN2Th1?b zDzngH&P4jFuZq2HJR7d9cpT|qj13xd<+@Knx&Jgll)W6siaMF}h~w5L5+nPaVJ3ex zvCli(^TeqU^O&<$KFH~K?j{3d$;2T5Qec|MF8BQw#tDndVs=!kS~3&v)kl0!|FSyx zD3m(JoH6?|$`o`$JgO)~wCq z(qJviGWDv=lu^_Y7K&4vn(AyDv}mnm$wrvU|aE2cs5qTBSj z@qzduHyhsMrZDh?w;PqmQr>S1_3y)$9a`~`jD~G8OQF}l8!J2+TV(P`!XzqkRCW=I zcTBQ8P%at16lnhbsrw;LAp-C7+c?;eKeD1V^FS~bZ1D8_{UE2?+xvlRfWz8fp_mT` zu>v|cCe+-SM)9{lFGY)7W=)(c~mx%Rv4N{d^>f1Hge*51z&Pz7RM9>=c z`5yy#lZ&wHWeLVMBf9|+x29{B^ed}lo2UM3!RY)bO!aiEmYGC>UnoDcBRkwk#su%? z6C}XxCY-aM1`$5m=j;ts`M%9mOGtg@-FjT6P`}-y9dDiIvkt~-n$SN0+fb0c9nB}6 zs@Fr78CuOf?a?$#YLuuqXP(U}!5Kl}hCeAT0JBfaskSH=%ZR1*Go#j8TW;<7eMw^! zcm^C`u1-NA+-_F!tE|3rrFBDgAY#g~F&=~z);hwfYT?$Xm1E!e1BbMpNNYcz`oOjl zXB1}b*qlrZESlSPyuTz?8|rM6a>og7F{k5i8O@z<^U!llwzpTXUb?8@%%)#IRW*Fn zgJvrm+#Qmr_AR!q=watpNxt%-x75hXq$hSgTyjm~?i5G4Av_?867fc2EtyHDtA&-yepxdnNi=0xoGS5lCP?Ph{* zWyjt5@bH68&$q`cAQV()nUP!1X^dU{6zD`t_left`2HQ@3W%I1SNG!^81!%-5wx}y zFkaxd_k|?E1;vk!+nGK*tvanjlCj7b6ZpR^*_TkfRuz^~>?qL@hqqwDCEgS6N=@rE z&yd;R^`&%$I0-b8h9^u;d&Jch-$9FVKC;Y54uq9|X{p`#uO|A&*UEs}sLE~0E3767 zJG6|C^y>?F&WsmOBvY~pXSfaQ0VDzv1W9S>`gSr@^S{~*?h_1B<7yNdLhj#M&40t~ zVo}osBtb@N!{)F2P=Hx5-lwKi37CMU`c-0)gJl05=e#;n%7t6S&na#riYuL z6CX_uHt^>F!1=sq&wEg`*hMsoGXE~2eq2pi*xT|R{EghEO&5PoJ^&5kXX7jHsrw+H zc4yj$Z^|eJGKMrkfc7IX(qKj^NW5mv;U?~6djHv-@^q7Lwt}+lgmg8&i&%0Yqt`wG z&r9!(SLck}+JK6DTUQYJz6|drTF2SVb8=Us9z)7-&{oy=6OP3zwor$65ixO|=MB`e zZMNW(bo4OwHR%ba&+k3NKi&L;^_#^FWhTIC1CVW#@$c1QGj%)hpikw`&e9t^E{lU| zIJB70ukB02L?CA5X|R&q6p59PA>c6kFKnT3U6={#f!b2zjwsJuEjnH1J2WcCGHY=V zO3{har3}?%kg`>oItZ@zsLpM09``ppNwz#bj7mh@k|9W#&TLKvCM_MAbu^RxCg9c^ z<5}0-XI2DZ6wC~QgN3unq)1)fcIjfwq{Z@4(A&>XSdDnF; zncJt1!Kf6PGCC=$+t+@np(2M|9{tH%!a$t|v4b`OwIOQNSkOf6T~`WM z9J^lx&6oweFhK8mLjtqcRJi!`w#gLUZM$v_?iXhS>C(9!`z1aGuo`8H1|*lOh7lY- zLk@(8Ukt@Fi`pyX0;rqT+HB{d#PcBDicq8@!lz>*354YiC7BB;JuH#>4L%B)M3m2m zN~q#ne$O;?kIRbF#J3N#1W<=JQ-I?8_h{N}?hV1;@w&YtAC@i-Jk^KBaZ zcy}Q}nE(PcS=A(lUE&`s?cO`UBWlq{<7&~M+^H*=B+*_*5C?YHt)x>SM8e-H^Sg0Y zW9!wbCycp>O&2p_b#EBF|Kyb5`@O`X=&yZeLmBo!<5BE@v6x z!clJkes~nC;)1-u?#=)S{RWYUiSCZ`s2q^qE$)+JT#+m|&8g^gI^lzerXq0D-<%*aP69M| zrkN$G)xPZlhF?0`$KP=mF_7yihi4B8mv^#mjk$U|iIf>z_{bl_v9X?T0NE8+`&GI6 zu5v{HNM{s~?7@w%_g73zUcg+1oJSC5{)motMpo!TE&raASf%oqpLtKb>URPlzWf9h zvKW;g_QZ!6s)zzm^~+!KfnTZ(kBE1PXR7;T*kv;{W{-m0fy)+I4dz_=9y2M!HOHMw9`;Jn`HW>V?v}8)*HlWoPwbIEwOm#lVf}S zps6OjW<@O$6oVMGsx*ribKW4(3oETJB+?%fAazTI`{!>-iPsmnY{hHJG zzBcu!z_T!1ziXlKyG=nrSm>fkf`B~dMHYJWrp(}^jCQX8#n5rFIa$B?V2e-A6Jce8w%L;_v;WAN?J1l`Z8u(^F+ zgIsTm|2Hh#+jx?5p}7K3?MZz}<h}T656U)mwm>f{ZQ537pIxJ_{i}nkq+E0`fKu z7cxk;u5ZpqlS2W_w(%k*o7B?MW7&RJLMD}<{g26EJ+$A(VA7>Sf!Tc48)vm8MIfXu zVeYdL_`NykkUM~%o~$^^_k_`zpt3*7#t~JDC-ze(3o4&$bHj^f9Z4ZUcihu zL~X=;du@Xgk`uk`3q;X}jwhUpw?y}vC*JgoZZ0WE4~TctSeI0MfmNrV4Y|??{su9) zZJ(ffXaS*UJ252PK-h6#no=m&QZhw_FS6G+hka&?Ms9X5atH$NTOqgZV&VH`U}!G;` zck@!3oF6h_TiX4+n03IXfz<}7P0eV5kE{I*a0=;XY&21Ox^bvCHCwWU$6SP$|L_zQ zX;dI-M(TX@6OtP+&>8L#e7#F}U!(Q+_nV5B*i892m1Rs@WgIILf3`+n4Sx(IYoEy$L6zJ*FNXBQWiaj z9oM-;4>q-!olBB#t}TwT9Rt0T`iDX&K{i*I&j zzY;%t1gNdsl6R{{0`HXtQ{0Aswpj2uXTbcoP#4QCvMz>c#|n7bVF`Dbm?=aT6sEeL zg9K&tbneRz#Dk+S=T25KML8?tk)DzDj`hYuQpIbUo|#eqEb5n9ri&$lRLSPB?!5md_q32-d0@kZO!yA7TxWsW2m8_<_aFDylC9A6|FfSfzdc z^bWYk;Bl5xZ3Ny9d^CNbj)<2z>4spjkCMat1TJM!^!FIfCz`;0 zeQnf6;C&yFL@y&JrCBqsDo+5NHwo4oU{eITs@Q9*mGTud} z2_?~;GGBSMTeXEoNNy~q<#Y`)fT-|fn7W{E)5GF^w?X8l#aGU-Qu#enJOEYIovw-IXQJhr{+PLX z;_8oW6&lgqdWZT)fV3XtbgYnRb?9Y1{Pv-hh={8yEllD+OQ;*fe zo1%6UBUwn{avZUPQT$bd<1m(Fhm=`X$OFwgrltKX_q*lrn_&rRdDY_hIz2Lh<@wJ3 zcRl`j<|~kU*MU~l^4*&&pj|`&hTwF`&Fsrx2u_;qzenL?m0`3^X)sD^!1n`DH6taE z#Lu69K0`(Iqb8xj2`kTX9nUFZuhsQM+-lrGnh zx=q)q36a>R_w@&e+kR&P^@Q_sr8EHIrIiPG6%-Vxm*ddu|nu2vJO1j z%)v;kyP~7S7IRR+iJ4O_OkS_2S!aJ*v6|kSY)eU;h{s(5E{#|9ZQognCd^aJSxSgt zpD8N+a!J2^f&Et#9lUM*kae=OtNnME43HLfoWucWWfaoF%mbHd`}fz5PluJ`K_iAmUk_bAvsf7A3DJj ziZxRE zOV8@#9eQ3tfqxYkuk80*IPpU(qHv=YMLjiOq-5;)oW=tpk_Sl~k(Q z(;MgeurAIgmBv@%=4{lCS@gl_vNm>`=DV}Z#9h_*4G@={`Hl*Lzo=Nb=-6)9uU=iR zYKRK`YaeVHEMM=-jyv*qb~fOCeT97XMzlcVX7vXj&84J#{-#^}qZknKlM~*eVp83N z6>V(B@srhthiJu@ zds&`nBg9HAZ(FNH=zMLf^LRwohIVjDXah&DLU#w@Ue8&7?@ICe*wBu7ETjk5HLe~q z426%q{?-sAPcM7BvV(`e_b-aFulUjq=w@U{UM;p*-Ahrv8FwuL*w1rn3HSW`Smf4~ zY%Lo({umBU$R4+~Mb3MzKN8z4HNa%EzqVOXZDMTied@rMc3UB5+$BA%Mvz9Eed1n&%D(;pw*Gk(xwL z&lY$VzYSr>DudWX;>yF#qXB=^Itljt`_?}E^@n>Wvw6WpwN+$2OMS$U_iR2q!?u5g zW!EsLuAm#-ZPbeIWCTeTqi!+XbKU0^NYG5&=H|vUiO`J+vao$`4{TWWh6ZUWD33n> zaFd6yQo*07z`QbRIE$WL_Q^;|J!Yxcodh`V4cQFjAzctoVjPQre(NfyIvV$cshr zZ!b|d+$#QCQ@MiTpP0qDDX76f*?!Z_F7vdz#AHNbzhWcr7te4Ju*LBG&9E>lK0Ks^ z(b*;X@DozaqqL9i4f(z~AgwrAfZ4gq~CtC1XauoX}=( zC`&}nPage_uY*?jOLR+~py`&fSH@8G81;UKSQfifQ8dF&Hcq4Iiy`hhz~uBouW$NU zABdT;-{yDZ)nar{F-Ik`Q$=bvEOugd|YN193$#68ylq1`br%|b5mZXx+T zSG4lD%V_f-@TN~yh;hE)Z+i1xU2*vtLjtu2-U)tYS_hQ`<|J6-qDWhqXtv&2U)Jg`^l+Sbl zba{c3azT1&(fe8SK%23#C9*0tEqM)S00d*bz2j`~eAL)q>~|YjrLAgnLx0oO zT`==HwP3meN>GSf9BG*r&oBtYGNl5ibin>%clq)Whg9_7H~&rq-7usD#TFlpDS+XX z5J4X$IE5L{Uk1g$2EBf^RzH7jGN>5L0|(~i3-L>&IhSlivmmJm!+jh1elzamC43TG z`wq){oZkRtsCv}~uNUEV=p6V>rmUGlJ$YYQhr&8AQ!rJcS$!c=J=9ZCjGDDU#@sMB zFtT3dr$B<>K*d?%xbTJjel?~f(o4T`=_W3}@@3owne+iHSMCs$YNEQj5M5Ga=Y6?n)iz& zLh$U+aR9Y=d^;}Xvo5I!0n6kvsmYy;_j@=`oNCIG19I6tquDc~Z%D?e48}1jrM+CJ z7x(QR8VFjKUqAtO_5cyB>zhFn!+=d<+QqhoFnhg0KGBN9PTu?YdXD$fp!c`lU#Iz1 z#8b;l#@XIbGKT%EVjO5J52f*#s&U_0fcQ?`m0ry&!qmbmf6q9c+UvTP5?S!$)KReb ztHj^-US)~>;hP?K`grfw&4`V^hncU-?MkS;Yq>rww8=W_0kTx7(iIm2$akC;w6yy)gVIGsGL>o?i5J{h$#uH)nam7R|&K--TwK zv1s70!~t2IoBoNgfQy7}q>~)<_(-$!-k9;ii8F9?eg-QPwbDqv$frYpS0cu6kB zUvqxsmEh@UBY4gd;cY{x*1n@p5&VL*N^wp?7FJs~WECKC8~v=}Q}`I7{tc75X6$ZQ z=a14UcAhz#GS^HV-;Js~khu20dcQ9Q&Gzd&m7G0-zUMcHex#vy9^MCL;K(6fS@7;sp)aw5ES(&$R#9 zyWJWCp)I(>;L88NjSsN=qYA*3#5HDA{EejeK6;B(5gw4g_mGk>6cUl$tvdcTYT_&M z?|zFN+&a?*JtB?pmVL3aK9Y%@_({o&Y1q2MCwn?LZ_4cqlD zW3CN%pI%o-Z_FCD+EoT~^tVRkXERV|dHcyedo<(pX|*jLw^jmY%Hp}scM)Njiol2X zZBG0+<54}oKXT9c2Jv%zTyu!v`S4J6pfR`k5K5wYOZ)i2h5f0S9zwURc~T)~xxHsA zz7F5Qw0wNt0(S(DizWO3CLvHg$+m-RzIpGJZWh_QVC_ztEBDS~V@>H@P3~iq2I{4j zvTV9Nl}HARU%MOUc?c~Mi11?A^zVs5+7PV&_*=LTv(Z)6EeG-Vy12;Wd= zGIXBsx7Ha!$}UBy;LUv#VYBW;(#GEQ*oz1^9w_Cp({dHzoOBEgAEOYtxxp_iRgDA< zoLm2bTf6=y#_?j4jl1CyLYF8~kl2j+`SlC5MA11H0==U%diGG*jq}j2q`xfa%BU4D zV{kB~CV|qV4j|?))Xm5`b_4qczcKvzfm3C+Zf{Z*f3j@BI1*^fm*tgM?V^TiZ7# zJW{{{9JN_PnD)Ef=}Uq?&iE^!NIVGOHzVg}JOLJH4xTQ5z>>R`$AJCGp0xJcdJuoY{ zHnywTkvyOJK7MF~pu9J`4ykF1nS;R~N{TY0M?;KuoMU3yY4<@xd;Tsn4K=-ek`(x<&unnrmp8?Rjcw*hGHXLW_S{c?b9s6H>2Pz;*yV{1ce9QY0 zA7)RfCw;~96I{?Q0eRZrMP!r*7p36zN%y-mpc?ZM}{T#IqHjekrD#;SA`!_(Y=lhbR>o9I{ zo2Jk~XYy7;|A=kC+^BL#o2t}MrXSX;nYIOEq$nK>oY_J8Er{%Ixb`);YBx-K3010- z(!`xo%X^;`ice`!XV7&^AEFahC?O8R$J(VN=VV6m#d=)7@1C{$yQC0%JzsG<+A0OM zg>ew`%FczdOB!5r=M~YYm@4IWsbvga>+h+wt?ePHxR#0jz8AdNC2VOovk9a|GT~BdvJhS zu7a)F%Gr;k?s2phDhJ`gOxKtJex@@TcfO#-q`A`eoD)ol>S#fpH zHq}>yHG|Pa_pR)&IL6KHP2J0j9%>ep_J2}RCp)iI98=|PXtAJwQwkPl-?m)}+&d)_ zbqbTvC=ZXy=8$wv;%^Si;>l+x6#OxzVL2F=*Uu+N`+^x6GX>t`^-a@&!LSt&HtAR# zRu7V2ve)0wUKZC{NHC~AFMw&-JHPPE0 zKI7H--yjw8#dBy}Q46m<9egWBGN0dg2(OZ7AdQwWT@b};oKB+SQU`Lkm8ude92VyL zK?Qlj?Ap*g_)-1XB9V|v?%|VbblW+%Z+{{dW=<51ZKTKC$xGgn)pERUT8sMo?q4JH z!@Mo~A%#bv4Mm#5PN%!}G<|BO(Nt1nF)^JJIG+EdY3^j zHyC)+%v0JNle}VceoqmPpFu<`_aK|LZI4mO_+DTGdDHwu@%X+iSf==S>7VhR5`R2> z$t{vDVE7qdmBwfGB1M@*0}GbH9v+D8^n4PepyFc3Au_an@E?D0SiIB?pSXEg9n%zF zg9C9Mv?ljI2`x%>S}=aD#3Grbd8UHmFP05f$3EwYPtAYAH3O(Ge{mVkMX> zu*_J@{=SRj!Iq3E#+{{mBC~uAgF26@?=gbn^nVWyYF3p3!BE>~ce379sz1Zh``>LY zq>}-2?^wh};N(5?dH`BSFiQ*y#oi?nOx{DMP8JVE=Rj=3rZkCSuM6M(O^B|Aw>@s3 zcM?NgjPyy!-BWhQ!e?_@hh|(bEf!WSL><+|DyYo2KAJX`zc4scXCsFld8aLXE;h!_ z=YJvELsv=VSHJImKG{|%z1`CpqE-D2f==a!1P(qW8}&!t?|7LKu5 zQl`rZF)obHe|N6@8?~uXgXDnVY2YxT)BZ)byQCg>_&TxQZkDIywl1a#8G}PY!N$R! zo6!Y|ufO5LwoO@nn2@J=ljpcYkZo7GYIVjB6og|uG^hvL+Q&NV_K0-uaO(`#h9<`s zYRy{0>l5oF9$99F+}l8BZh!a)JrQOvZuGc^rPT+v z4o9H3ixWCp2ojI{r0{MgPTUBe+w;ZF!&Sv6<*T$y9+E5_9xDqr*McOfaedJmsNx3W zm+95t*VPU$jl}ZMCrp%Tc9@Sfc~0%r9e)oSVSem$%)*J@S$KwzmcZv;iBLwOrnmZg zyuuadkB4FW>piwok;jHWh}G}>yZyk)C=GJ*j<%8%_kjz!WR7Q$KF1mK7M$g@=&C0s5e0(TI$Q^H|MTCP2`&Xf{ybQb;T31H z`70e9{iIK;ls1Y=}YEhe$B+;swI3x9w;JS_1fD1BmsgMXy#)Hhcfg&m?laqM3w zd4h@k9slKwg|zuAjY6Gy?QLG7)aJliJ87xi^KYDKx zSBqJ+!37t9;@}+3j%A}zKWF$OJ%r^Sl2r72t9eK^$!@|IkjbB1ww+RcJ@Q?EUU!-< zA|;tWTojsMF{bGIpK<~1(Mst0C?09BT{^=`6z%C_6B^-s{LXG`r9!m~*^hf?zT|fj zSN5qh=#-X|)|7?i#*qi6u0{1Lian(aWw&G>7%4Q;!904u$-(Aq2{EB8u~kBHtjbgP z7-_7XnhBF%lKmF1S&IWwB|F9BxGU1@Ac>6Pw9=A?d4K|qZ1+2f9e`#7f2sCG_GvjdokScGddqK0hYm+h>|q1YPs-KgwYVs^$5V2o{T8Gh3Vk zjf3<*Fl4;bTio<K1V7v;KQkB8peT}1b+fX-v!d<6i^D9 z3#8pmiiH|xD*>x;;C>SvT71jaQ(keTCRBV}WQj?!^6kl0log$6_}*{2%tS%v%Ki^#3j{1|aBH{$aS(PQ8@XAOqY z_(o|+11oFHYxao9(I{RRFMn}?E`kuEeRE+7TKoRUxz0@*i>WUBXwgqJ|dUZX^mmFA@CJbx#ux4b-!j$e@_yi)==f*1Sj+uf5!`=}r*^70L%)lx#4gUyWzqXW#3G_Y{is5g$lbdVrvH z;EmB{LSO#wuXdJXbiBS7d`-kv(b3rnCkbkZb8cw0>Hz@I#}^xJKE7jvM=mPLyiMnO zN$Su^(+dXJqN4w9wNVkNfrWpx%cCY9W?1VjMQ?^9q!n`zB!m)H6T}%;SR!d0A?AUB z6}k?$5Aui{LQ@30DREl%>!ex{Mt<0Q5LiT@5ph! ztyP<^o_dP!=_`@vv%L1~I}QY{6QHYx9}LQ}JXKsHvi&`2BA*Ys4AT1Hz?4o7GAdb; zm)>gq>G4d#PDt0v2=^~TJP__$7?jKW9Vy^UcMn?A+L$g4>7Q z-iDW<4|ZbxjjQJC@18zkA)HEsq>wilb^EO*v$~lAym`q!b*3&M)Eq^Pv7&{1-R0sR zX{Xa6r#Hr7gqH0_5&Q+heOFYFIJENA!NC-=SjLw``;$7I1L=*eDLC@@!Va=jeBf92R*jG<$x3S~n zWD^foim8$$q&K34vOB*Dy#cYID=iVQ^5u0R3E&@n{Ib!uUq6ZO!nAuAxgA!s#C#0RX#+0$R3jn@^cH@ zvymx^K`;*_WtpVXX{_uOk$XrkXla=Ko}&4tP&WD9@FAB zr%#%;mctgK(Tu>5#(=!07OZ; zT-rsNn8p400^UoP`Tk&)Xk?50;P9$~*Qm@SWc=8~cqHfe6Xq$LlgCcK||SktpW$j9FdJcZT+o8+vhu}33MXAQ6(Nh z-~mY+-bIbC3K`tq?k&QbZkl05nrevJVaGanmA^*gg{m;Kr48|0a7|a7sUuD@2{BM#fR9R;l9@c8s8DJt2&kP>#Rt~b0^^fq}s5cYr#i> z&{z2PyU4*#cb?)kJ)d;zvffrZF6s&b;0I8Coog+IHU7X^mA}FhO4P z=^WPdb`dC#x0P}nky%<(?|&3bIUOJHL$O`11mXe$ymEYeK~CjWWx3%9DB|GeNbf$; z@SEf)jdbhl9w6;bopUA5!!8mj4t_sizJ3Jkg&S24fc@uX{zk=Gwyii=A$6B(iJLsi zd|W5%O!o=%U@_UClegejV9S9lC;88Jj+V~^!T!g^J%rOfy^37JXy>(FFJ?BHNHSjJ zh@axh`LDFuk1x{ zE9Z@^(x|zaf}hzkmfG3OSIqaN)|XXxLJlHl49mV(&vWG>qgM>SU{$%hdrbb8p=(5Y zZIbeOXGj>I5QKp#oY>>O9ED}`r;xEid#3q?t0`GR!zcC6pymQ`QBdmBu4HiW2J`G? zl2soE?Ink)aOPHtA_kn3$`>dw+Kf{ECNVcOZ;qZqBO$cug;=&~aa8dqZ*K@n%Ab*f_!>Di+@?7V$KcFENE;cL>Jk?&q_XgT=zYXOQ6#Y`c0 zNic9Qewu*a`ZF2E%uDz_?e3kd+hkoE7hR~=1jcw#Xcu3~tF%4MQlyN%@42Gb)Zyl6 z>5C5|+bW1w+?U{k^43l1z7vnp{%{*-&CsnBP^N575C`Y-wetc;(-5opTzEf36QZtiTc>Pb*^ zkw0`DlRE8ofxYndvxi|7Bqm#nB?E)vz?Q_V0Jb03VEOwJF~6dX3qb8rkyw2Qno!b1S7ey46^T zfk#s)mr_ckK}hd?;XN02oMm>jlCUlVg1JDuh8t>JWiqcBbWBmC=SGvx1$b0LLtcf{m_dPmIJe^8b<_{BnFNfwDd@F9ZluaS zeyD+;vAa+SVfC|342&@O8V@{$@c}F(2plV8ZLRH^sbrlD3!lo5gLbj&$NqFh-kT;aQuN`7RYb~%&VOJsX~+S^#{^yzuD z;jpf$mW3bUa{s5sJ-n;0OM|zbB&5}s9QM%U{e7;S8PUIrUTNyjD_=^V&!@J)5RGMa z3xfqXk%P(kdIZ(GxO9k3!Q!M}=e_v2f# zLQ_N;9`D<>OpwdJKU+%0^K02;rs9##)e~|ThDIe~CpO^$O)5lv%oX}d!d|BLcY4ib z{_e~4u?rcU2s+=6O10D>6QSGk%DByRren|Il^c2&ICnG_=Ci${#$ppvJX5 z3On$d7Ea%e_Np7+lewP4E;GFGdAcOEV zz9NB>QIgdWOsBM&hjqhr$jyKln{Dx{&45s4j@R`4oQz>Y6nKN|nP^}%6|IYG7)-=_ z?AY3XzX_gDpiwC+sbhT0Kiq}9f}@^THvoJ0lI{H*;_lnm_;`22ATyK%CAu1R) zbYRjxL!-68d*XuN@PRCC$=1@8Ga)S_Eywy{C&FBq(q_0r`@(np-FoYlhBOo=(jl&91TQWm6Eh7_xhM zQPsTf_%sWE3oAHLgogP1^LqLL3_1!r96qGZ>}Mn~?(~0u^fR_IF15fvnZyLhSnj|HP!XuQUSo;egHL&<|@ zG4UAmt(^4aEOZu`dc~bn(Q#n(sViiW=smmRJwsL9tG|i;n8z%oEdDV;AN)@D9IOJOb8`XBGdDu zbk10?h80SFIR5(PM&y?BGh)`a&?h_&Lws%ZQYKMXIrWmnB{bN#MgQ4_6TuIEGA?+W zr$#sXB&dK`E8-!<7EqEIed-*3Nz@GCz^+OnZGBBUD`jx~kE1)wa#Y)rDB30A)Sm(* zAtdTb(EoAPI!TyrGVlWWJO_JfPt@I>Za29AuCEY z0W4Wq*y{0KodeyhBxd>vu~YJdyJU}W1y(VSO3;ZGsW3((3Mo+SS>EW-d>xha1w{LR zm#|J+$1o4$I6v+d;JvM`o+vd4aYki$fGOKrc->cO_Oy(?p$QRep4u#``3b7G(-6Ak z`^TaLlJ$MmyZq@!Ak&$Fwz7Yv?(Pc1N^y^&+I_;Wuavq(H&6`e%F_keiz5&6`P`8{ zW3_rMn?H99jSJc3$Ip3=OLKuq*=Up&&jzFM3`B~Gp5+d@Lyq`Rlq6;>6iWHqwx~>l zkp~1H(7|#{ZBI@~s0D*RK${4(6r2+TZK?H-W0_D`yA6Q7+ZmZgDw|twN*B02wm}>V zrG%FEuFTOKXRl499DO;fG~MKSMF$T-Dh2h?(HRKm`P>}5KeFXn+=F7NyWE2*so5sC zycP~MXLU@ytV5y-#(a2 zg)?7msW(ACWHUAXRz+cIitjGOM#OX=hG=CUeT%J5*m7*)5E-Eh;>Y%To|A{VI?m?O6#uw5hihv82%C!0el z$9W8cpkZbTJKM!q^4)jHeNfk&#HEIh{%!fDlML`>Jf-RYb+scfSL&9)tm|)ZyK89c zpSfEjy2j$pA3K?*;_ero%yz~CO`Yv?)ikF)nh&0f;`*2K_ zth=nAW7tH{D=+i<;=fwg3>v~M)6#pH0~NR}CU?w2ZzQ`dY@-n{5gjJz_8|JZF-i(& zUwhiu?LNf*lBwe&R|4_Z$^fc$Qh5kWw0*CQo`l1%e*jCeiFsk_(;ZXKf_Cav1W4HS zPg@~Sr5}}D?@^~mmV6k>%g=^dJta$wPyd@B%&?i*#*;Vw>-RK z31RticNEBBKTQZXOKgE~96skzIkyiE-!#&8X}kFLQeQ9%en-caXGwZ4RPEmA9g0RZ zKc1+Y;>Vs(^_?mTc#rRfvFun;Z(Gf6hIkaj$FIe(*Bo?Q>*h@j138bdwO=u@?8m$L zDzGyzQi6A~NS!hlqe9Ure$c2faxZ_u8Ap;;RmI1g*4lWi!>svh4$ao>E2!|*vol1z zxmzck5i^T-U!S8xnFow(Zc&-ab=r4YCTxq3Kb`}3>(62G`jo~X(#uZ2qrLgjM5odIYyB5$!krXYWOIk-541)r8! zz>dxC?+W;+1)wG)OzAmkrTP*?8(oq8!2jK&&`SVA{K*#SV#8k)^6c+i8)`>67<<(F zBw{|TozT?W=SwAh4A=tVdXukp%l|=rsD`4D-qj3XYe09<;Lhy!jfLAj=O`=xx6MB{ zd?m)3Y;Wg%h4exLljOJ&F^ew(-%F>9joAHXUxg)7ed~3VlWN>9Pk`YsE^V7inBS_J z2LCf@>n752tanj!h>mLuV_JaY)RKU)@0My2@y7go^U&%{gXKtp@ipCnh;+}r>P-Eg zMC`(5Y$I8J7A`y%(oEs0!Si>>Ass15U&j{EMt5RTXw;MxQXx3}X_6?#a z(_ZlS#f{<6nC!&;o8Q!r}uvj1BQ?)=`JoI1p>+<2GD5Z|h z7`Q-@q5Agik2;J&t4}LA!{E$;=E0bTB^-0trQ5I_xuQ?2pyPq)y@sb>WuP4W`y%D&k2n-tM1Tgr($@wh`N}gdD>50j5Nrmd;3?_T+-2t7Pw`;I&LxIQqJ9Ig zlTJ3oWykD3FHzpbCn5Pe!D6MDY5f;LAJb;7BC2&fk6?`%A4< z?d`Hdkgu{?Rse1B3|!MuM*AP{%%^>lWxPki<38?ed~CJDk4Y*I|K|7eGnQaOyrCA{ zeXEkz#f#vy*=v|`zMnn3{a)|2mE;J!c!3qW7GzmJb1YqNHs_ZDFuQ>-YN#I!&baI8 z_EdSAYd2hv`KGmK5#@Z3Q%l^tF~f9<#>!Dxon1bMPlp4c@xFaQ=kF6K@ruGSpInf+fOTro`%A204z#{h&R*p*sME~TT@e3jx z+8?OD0K00Etu_&VStzY`ERRc#uMBm|?TzoTXbh3+tpuE-spyFIquB)o^3OY;xf@f( zR9k*efP(26T6j70TF)TRp6x}Fl)Pt_l{y1F0YQCx9V8V8R+`*rMue9`!;XO*K27t% zzhmbH^(rkG{vf8eZqpK&)?@PPlM4BwVrqOmrK46nT-AYB_t+~b-|Y~p<*9F2MP&!=X-5u9j=4iXQ7b6Gom`o?i3#`4QUFwymXcM9HlA@X6$Y?_ zK{ST9vlmbu@CIMyyU36uZV3U&IMus03x> zhYpsT(h6;=+xZTb@JVMD+gs9M$^d$Q?f9k;);YxX-@H(~Bv3->NCc7lZ9}bWdHP+I=grTn|9gBi#xtnAc{Eg{atcD>XlnV9uY!FFqqf_S`0MWDuG=Q< z1wz-{5%Zf1QRn;-&V#I*LFe<89pc=RLY3tAt!!ua@AhR5YU1BU~J6)wu9uvk=y>iznGc9jZ9qel%1#!q2nvqggm&esHBm zSl=5$)UIIqTlqAM8i!*%_gtBJ%WA@JCs3wdxwd@VnU+JQtNDQ*p z4E$tfU29TxNz)(Dmxm=}jsr0C5zykMK}FZq-fc%aydF;1%`?N}6?F}ppA!RsQ4TES z9^vNV zJK}C&o0>=KhuW#$yy@kZ_*?_~p^tdKH_Q(-Ogt^=t?~2sGk=j(+vc&d@JenyqSTdo zfyKmbB+$tt>k3%KAo0TCZM-_C!;cCYD^S03hfm}4Q)oUy_q)0i3BX3?BGsKC+^=A1 zD2t_jb6$2T&3F9U&c(!%t?y||f_7JQ5w~YPIDWKv*rA%+>;CnGa&kha&F}Gn*AUkd zz)hsS^t|zlsZ&1-`PFq;-(gb3qA2m}Zr_~fv{W2OQbpw8Saq{pQ~|u%uXfoBz3U-E zU>fOUg-_bAO=;;{`P+o9J^tbr6^wInQc~d70`lD1xLqn)zVJ5Q z$ms<6m3c1r!KprF@EnV}ouRRwYXylUG!3`l2uV;*?Y-lD;kNz2_Ag)7$G#2M`~p>t zn|`p@Jm33;vC2QR{HXg?g0#&L;rqvyOq>C1z&$rDvROr4*v{h>{4o(C7dIAl$CzT= zt9omJUw%3Oiev{;qU8Pk%?ei)MA2HCiJ;n9(A7JNpArGo2 zRktyy+!^SXDV`NppkIg}TU6v0E5Zv81gbp!JNPoiDxRMy*ZO1ige>yAcSf%|?|R=b zPJ`4Xbi*UyJ*V@#xZk5QI|DC#%b41Z&P zNSPJw@wLgF_iwEdS~=a>RnPMij(RZL$3-ZRB62cxm&X{3<)?;`aSW6U-&j(CN93ji zIjOrOVX@8r{0`5upTqP!N#$`uN(;wP>C+A`w1vGiOz~0oU9(1B9ZU_Xnmrl^edoFO zEuoyykCD}WIGbZ(@OPEc;MD@T$gT;4u%h#g52`qdtJ@+-f9v1-1cqvl>HJ^8sBqU4 zJ>ozPkf5Ftmlr)9%8P*kpJBi2loFU)v1S`mFnUB{fw{)R{PR&vU8{$rRr$t5O|4aK zrEVesOe&$mia$|2&_2>Q-(Ij(Y5kJFW$G2!e6WT&K*&T>*9i@kz)*M)ffN3dR@pQg zhxZECvLCczkAP-TG{6V~39@M&T~9CcVoLF+a|l<>465ERWWKD{O9w`D=}=4yiV4bh z|GYoY6`kj{qKK7SxZmhc$%7!JU*YyCHQKpYZId^9sI^r@*a+!ssvBCAn2Fcb`kflq zipgJ;um{-9Qw*k9B?=uO+`-U>0>MTr63~}`9L~+dTkO?OzrY&=OCbK6SKm!n8%N(+ zae&>{-S;bnw76LHADh#>PTPe&wCZfwib>Ew&PuHTSBN|6hpBHYU12*#Hp(exo1Cny z+>ypOvSOC_(wx0DYKlo`gU(6%@^*L%TTM+=yub6cK6UY0z>8+y({Hl6wjO2>MO5wM z0izmN#}A^(5@xp~i6$DhL0(ME8y#WJkx&e?M=ti)d}ztZhQ``ImPABQ-|w<2@D877 zQ=I4f_aVN^?~z{eH~Fs^&#xX-+{S7S)^ehd@r(UpU1^+LPUo1(R+EAH5k0a7B3;Cx z{m^``B5V?%=({K-ljNZ5`6~`*#)r(209e%1&?Z?&jcmZCs_6K#JAeoDPOSgqX_GId^6$oeYSuPp(UdY2>PO$_amBzLMnp$QZ29ElyBvS;( zO-ok8qVUnB_?kW$-Jp=knXlaIGg4Zky%uU^=ac|s{nbx<>6MkF*H+8o z`|nHkZi^JFxh&3?le)~b1mfASaIhI->Jh~DMcdn$mmODbUG8)<5`sTHe7&u#@VQz6MSxSGSj9~1l9&;~y! zbe9mjYHY~Zp;3YoX5iX#w`qZFDZz#8-~S+{M!NQ&rEM_%R9;Th>13Y3S7>O@G3MUA zrHC-j)TfBV8WEJ`GkOTW%#BGX0y&%A`_pzWTUi9?UXhU-1(oshN`oMe)FKpC<3`>x z6R3H~kES+u${*sb_zGki^8sFVAL-z#>D>?76tn3gFN51pk(nNCTOK46xy1NUcf}_X zij>7uX>)WMr|vJ_KPLbK7Tfe&YuC!cWh-oQ(vMj`q_<*grS%4nbpJ7^yNO--UBw?s zXk|EDg;ZpYN_19 zyP6G#9yeJKZLN;s;BN#c-hDKKtAQEgaQW=aC5Lz8jMYr`d)Pqi@iVEov*p*(=9{0f z5n6A4m6rZn8;Sri=uQ^g@4nc6Il(*i~7-XUOvFlgK!d6SUs&qKTjh{d|>>lPbqj zICB3+1?(pn(I;bX+}z3e-dRE(dP{W(sVBb0pl`=FkCABBiW9CfiN+=dfoUikT#o=mJMZH%;_<4Iv{q5gYj|{5n z@Z!aR%F-mYVcVL1j^dV=u;WjkH)W;aW5P*(0o2Po6%>Hw1P*&{HM|?ace?3B=3$3P z6wqKWCQbZ724;2SHyWXPxPe>pg#`&=a$QXVLKljP!V)@Z>NN>v8JI}2K6ZTz?s?V8 zJF(6^tnlI+K&ix@IHU-qBuBa?U>t#^U2M^+IkL zyVtAwyma|~(_(@Xy6KI|b3Do9`>S)qU$=pdWK0FwaJ!u(qQbvnCBvzCms;mBCKV_g zDtE2{7`oTK7~t1jveJm0NHO2+g#GzOGgh!=#9<8%*=YIA$jdP+Oag+jb~dL>PNp8a zSAt*_W5JJF>lJ?290V&XX=S5{RrJ%1Ja5|VZG37}TWkee#+I(6O02u2h_w6RpytF; zKid(l&JJRcgo@9wE>jo0_W3CvRwBx0qky-^!`wcr0|zavv6aQq#yi8!5KNkV2|VSug$A1 zjPt8`i`85))ap^)0<*&e&_l~DSesfhk@b)bAA;{b{ZwZ@kWR1IOu@PDLKgR9#dg4# z|0?Su9^RE%ebV+m;njbJCW>QztefbKadmlemst0ld@bX3YZH~bF#=#)6Z+57*UGxh z96_*9INI==9dPWPi7It{79&za_^r_$-c;iCUCGQKF^#u{6d!Kw@uk*B6z2;A=Rw?;STvKvsct|*>4{yTb4(isG{NWdAzttC3$uYT1Kt`dsD3A zcY@hAGD#S`bIbedGc=uHW%JifAgvtxe`a6Z1eiQ zdnoYQlE0g&v(1IwJVSrP(+6wuiKP0+S>=8GOFsz^BIwdzb^iKvQ~F&29oPp(hPT!1B~rmL2M(s41rP2*CZolnFu+V9P<%-)g0&TlCysaifn&2Z`R9;-(z z262xri_3NtpQr8H!|v(Ubvs~`9;)xtEFNfnrG-dE+jA&8f?C9n4{CINoM>EE@8C8l zjAVQc2ZaYu=>=JyeI=qK06tH8a8B{N;IvpI96|s$KEe5`4hnVIej|S*p|P5M%wH#v z!7>HvCeLEJV4wEvyX{1E5020hNmZMT7V9ix2q$Dru%^vzWiyuB8?wIO*Pdt3ZtuPN zNKzaCA0yjmsb_w5O)uy)my@nVimy=(9#HkCVA197aGkW zx#J_#zxv-dK`Me>efV=8_H~4r^1X^IaYD5=&41tXys0y{PnkI#;JyH3^0qtGZ% z*RPXFf;XLV$Ps^1#Q+7{eZO{g>67i<#B@ibs|jvn@|kZQ;|W zWp9|oicw-}OKP=`=Vo&$?*O%+;>9Y|ZhaL_RYJ#6MDNmNa}`e}pxYAxl7TQ^M0NBk zCrEst6(3u<1AuBtWX>OfCIXIHA62#^(KGc9s!#JgT}Hlt7wgw@FjHA4Dgo&bJVAAC z75iLpKlBL-(cUud1hn|80POK=7*oN?%V&NYT+FLQX4}$VHv;1UpHft6c6`~qYoAhE zb-Z_#J%7TGs!a!fo(+ld(K*{sC zi%fNjt0HC0!PqzV+6pKkRMZj)elcWRJ`0 zjvi;oYiF#m`Lg3183VyNUh+vYQ;Ta(dKzlq^`bV3ZDJ8b42B&40x-zfWPEw;VR-i& z8QY<5)csmJpUvE<<}jXXo_a|@NR08=M>+9R;>FLvuEN+qPu7sZWOt16PRI~jHY8*5 z-x0TZLmpNgC@1IKLsB1qmKVg5-EI*I#)~v?%DVRQR%NkxCV1Kv#<|X4oU3*Jym(&M z;|#MSpOCMgYpGb%-#)5fz#Kt#c{E zJezTf9C|R&Ic;OgH&+zHrU_L=%il*GW?d**;>7+NAw@pOC}YaQUmS=NPtKCSi)ev1 z7e;g<4I7}o71db3@2S|zyQrE?I}|_8$vPnE9Mu7ri$>4dt~}1tM`uOVY||3FMhkd- z;*f{=27*C7i)Ygz8B?F~_$QAf?ozd{-cdfY+BXX96I4|*E&q`Ky)0T?r-2| z(REgFR8Flm;jebH?W@eM7fD-pQRpoo#b!LR3fRKjPohjstbb6~+e8%0@l$aA)id<4 zx80+O;Zrd-x#X5WPFFP|GJ87XT30uD9aWtQz4tRq^E-5_zAlo1-v~G5lIh(po1=}t zKs$mG@Hn{3Vod4dfFK9ovNC;$_YtDevDJBGb{9loHY#r3-EGf-)E)Ao}KY7 zH>IZfC8)KOdoYVC4Ci8&E>sp%1LIaa1U#?Q+2Y`&ir zzWV7ddvX^23BRVol23a=%~UD`x*=;Ay!3$)#J}+&Er>2FF`N^2$4R36VnO-PK1}w3 z(36xM6jdBGLs%Wwf1hx*;r3qtswHU%^zv0->FqvP6JqVyS`wXq){aT%iX?*xnsCjR z7I0QO_R;(2?HO*hh=lj%bOsldU2ZSHf$whU$EbZko)HVg6hZw zbwHmz9<5uy<4J-@I;HI}W-W(l4t^wE} zkQ>f%T%m0jbf{w~3$(F6|5%sC3r2vKS?@ds9Kyjvq=e)B93nEx46H;8t|6{NrEGo@ARKcBO*f*EU9D)`~ z(c3QG+*I>#;&J`j1>6sxQqGhR+LF$5HUK9aPm5<|S@|x7`xVA*3SZu4)G2C}yVIoF zy8CnYT0Ln2<_B^;L6rSffyWEZ(?(Fm22=zSN;)Q2D-6mBssV0PPfgZR8%41IvDS(0 zDRngceyp2#^oY^V;hC^f9<5AYDO#Clj{%NbOPWf6fqh=z+8B{ zpz1EXW<#7LGmm3UFuNvon)lIK*g2LaHzX;CUF734VWg7jG%<2IAls5T&?idfv zC%gl4{Vi&Dm4&m&k0`p$kZtpP=tum{YAH?jP7U@s?69{0EF9HgS$}Vw8i{nf5? zdJFF=$=R?g>%Z_Ot+s@|*7K^G5iEtFf46BoQJY`Dd7roqBCwpGuI@Ris7}^*CNJZsMJMMZhL=-FwON z!}fbnrF`~CGW~C4e>!n)sAx?WY4G?>vo7UYPVKI?dTXuMnksK&(X(HiL@il#{*$7r zm49$E3o?|P3XS`DY`jHIbMR`Z6JrPu>$Y7Ov#`AQ(+CpPR_)&10|f`{{JAxwswgrJ z???aP=)EsCnoRUOJlwF7BD!tPNcZ^zmhO03#lZ5$HPEu#(%mB1oc0(LPCr!soo
{pPz2{1+tlY38m=! zty+@VJD4O<(sZAqV;;GA;^LD?0gcswuDEd`kt5iD6w$Zo^~z zjo+y6#NFh&U;fSBN}96m&!nU4g_AR#;Bfxjc480WBa&}09`djKT*dpQfBk)xUfsR( zDK6*aAX1SVpWN`-6y@)SG1O;Vvo;Sz!0Y~8kml$5ZI|+m{jh@9UUNSc`ck8UcwHB1 z2%D)5RNC6X`C;r)m|-tAs-4h7Rvjz>J`R&!iO*~A0o{8%Vy|pn7oD81#kWoG$s+Rp zU~65LRI|r3umRPIZ4LSNZOw>h-4bMELcFCSjX2ZjhHp8CKp^l`P2z0!=iZT?d$KPG zrShvg`cFi?$wt~>H(Ew0rRw5`qLV?`#;f2HDhjce=*7xz1?Nb3)60HA?-gJjey2VW zYDe#?^78(R7&{Mkq^^X&!Yn`KlfswJ!SC>pCCh-Zi++Kd@n5^_EQZby=VOEOQSShH zHawM5-2qJu${gp(ToQ0Wp%y{YhuB)Zj7S;pLTg^@->l_K_h;cspDjf-9-e2kx^`2} z{uZ2}{>u<+OB@-;f-RomT=P()^>}F!Y^Dq)@kwZU*+aH}g9P{T(*&&9Hqo`T`cY&Q zql^`&rZ;>9PQJ5bV@pPL3*`=v*?_;x3r-W;&>p{Io=7Zn6R{xInq-T@!162F9|G}I zs;D64bbmX~j~_XyMV~bwa^{n}NKbKJJ>DuM|3F~noszvCRrj};^x z#x^ug4?x+n?gYhbp(j8gQCoSFdpj5pJ#w}%#nUEnvJm(_!3R_|B=b*~;{?eeC70=2 z8FerQ>1!!5#bMCYm_rnO0Jb>xTLMY2hl=&|HrX*IDl@9T@e|n_JfLS?-y_dU=I`df z`pb;TsaK6Zvk2c+Q&Gu<=ZmOV9I4y^`t3Ijkw8FKSH&Uw*V&!Sk&~>k6{VN{v>2Uc zb6)8raw)x?=OX%dYu)5bv0j6c%-Ud}eYq-bzxn&fxylhcPECC`Tf-FMw7O=?U76SN z{`@p>gNq2UU_mq!Ir#B=D{kXKAub98e^%K*{R4*H#Sx;T3QkCAZyWW1j9En||I5m#kM=#Qhe+VWS3gmZCPkSJ^L zE_i+H$G2e;OIgJ?T8>!HejL2sQKf$Lj*6vu*jXYsA_ZUy3`v3I=EOmrpbgICulxN@ zaHtt~?G2++Kif=2dOiP`B|g5ZMccd7MgKac*Bc>k)^Ua06+kx)0o90yOQz$`)HEfE z!?G`M)-z)m~;|U=;^92*C?8_=S)A4vQ99 zfZTbb)?35BJtbe;CO!kX>X>P?gg&_pf855Ek4u6v4GD4Fm;}1y?JhPrIjp+BK8K4r zb;)nNclzJGc@14=sS%rb02Na&dtx9xR1NWv-awW2Gmb;I69+nU*&PvE#EIWSkqZB8 zm9Cgmd0~}t`(rDW-4QH{j=N;VcX&iIQ%f~`?9JBE2&2r0q-%Zm5lit6dS68Wm$%sP zn-fKd=6!3ng;PbUWBA*|99+w}lrQNdu@^@~ohB<}2&3GnL}jwXC@ z-UZed^`clJ!a5lL#OtEJeCo-qP~WfbhkP>5WhO78yW;6XWyrWq(4^cZ8VyyD-=_E# z59lnGjm$LB-q&+n&)xp}@?LNOL8tt^sf1S79Xt?2NF2>Cyeo4YJZGxE<|-jrHLhxL z#WPB|%m8T9-!-XAPE5SUx`peXOZYwZ+wazuFDl@GH6d>fnZ#PBvb&Cb8P6=LSu3xA zq4pRZNTJu#SI3acCC*J|B7vz3@*rM5J#Mj<@@9Mk@(Gzg%Ig=ftlRG*D_5UhR=uYS z3sH1HMZ`BNhF62?BJUMr637Z(QV!bt3rnmHT8G9hSKu5DnA>2qh{!>eVGX&DDdT?F z(*C&=8q9l2eo1j>NGRDg`m`&^CU-{Ii;!D&G$`AfMatogA|V zhY<5@DX%;3`P9|n;Wz-LhS z_DliaA=h{J`d}Iv4*5cWbu};?PDzc2%OkjH(8M)FvZ2l0RXnIRd&Rr3C4QhmF90Ry z*9-oP=051<%Gf@GJfB?Qi{Q>NsS8^tcF#5%BHz1RB`eU(5L5IDtU!JF$DXZvbUS4_ z*-rHQa1cSK?ClKg-Dxmk7uR#z;M8++*u^kKZrbG|iMcvhZ3hPXm(IH!U_;fo6ZK0E zmEn-|KcVd9#k9>|)EoYUw%;R0e6@C-7qQ=k0LQo8eC~Go&$}I&o)PU4*vpU`{X~hH zJb1?W-Z%CyJR^o`8xESi7& z-#h}x8gAUwUtZ7k=rcivdtDtr8+aEZxM%LQHgC*=$8)yK&n?>5&-)D{z}tebT!Z`N zx#6{Xh_2<9PhT#1r0&jxkTB;}ajGwv^rtrY0inf6nZLOlUmvE-M~bj2Cx73?Lc3QL zDN~o$b^%a?n+3{ru5txaV$}g_y;GBItd3FUU5&E{Tc=@DZn}&;H+j$bsuLoB)zdC? z4eO8%h(}j5v)~v#U8rw-er*}(ZXZ~KYn%NvN#T%!1iR{q-$*HodkSP%wDIrby(X1J zc;5-6ywWt67`&bh1p>uuwX;$w()recpUC{@t3|PcV(y`2V#^QUso(Q=-(6~-N^%=| zk}#JqYyUf}@1NncE0PtJZ4f33d0eL9;bE7c_vk>o__HrLb%&{i`MVAGL!A+4EAi~_ zlAqic2&t^1leij3%QO?=AblS?XJiu|xCE||{@puGz!$b2-qhT}lfLr)Js1Fp z;5g#iIqr#ZOA^TnWvl(({}@hK3#8Bd?)~gUih4z4p7j}ZyhYH7uobc(2y;yc=hL!E z#_H(}aPdhxTC>_L$gtT=Ij@46DXDRG)kp#By^9ts!M{SFY&^7T-N^nQm-6@i*&%~G zib;n1Q2)r`TP-{A1s_W+G#3nRFdMuaZ08nKCyoURb?lH&ug*o0iB}>EX`8bv!a$A5 zW_{aN010?VDR?)oiCQ@y$91Snb2m*=JVG69_7}XV{$Oi(iiNwuMlq z7fMRS7-HK?6ilt4U2J3CJYZs%^TGcxlCJwS;UT+0TskRZ74H{$8AD~Oz3I(wZ`!=O zn~h%K-U}voW3m7qvW5S|P-MT7*(5 zzvBD3#UQ|nl6BKme+w*(2hx~P(l9ftTd;a(7ol!9mvR+c+= z$whjmUZalx&6~XIeb7ycuVpdl`jIpd+5KKBc?QXhqF@Yo9dz_5K=$}WoFi{QyY1Bm z4EiMNT7CTR@$&nyqGMLpIhK|xgN-8<)z8zm`>+eL73DoHWKQjG9Z)=)94Wl`hTV?z z=sbzfJqDHOn{{U}jsFs@me2DPHwgz@;pj5PK+5X)OP_A@`nX~CrcZw-;A(ZKhvy$_ z3vobKcBjlsKG?^Tkx^s81Bc-C=+WH_U9X@1h*`r52;+Vq)H2B-Ls{9@&6T0o|_BxR@at+Wy>R-L89(}D_Xs7Zs zj;eYA%9^c4W8W`p1P>Z}&w2po_IXPBImeF;;CZ%bZYvZ{?q7obx@-ApR3h!W(&is) z|4n5V{s$I9fz$SPltbsf8)mc?kC~FF3O^F{B=7c>*QkDXNyKjDW}Ea|A{(~HY<6QNx*VMnQk8hqLfZIo zFpA4AWz0$XYz>{0MQ|F}64bhRHHV_aT#m=HP`@k#BSU$MvrFrCW`B+1VRx z{+)fm$)GvfbpXYUYeG$>;BGyX2V?Lx;t#s1#>Y2mec(HIDn>?t)7ZDaDb(Sy?*9f! z6tpH1FT2TE*}<~&OEY+}yT?p2B1y{$C};8DWlZ))Dvh`H`Qj$)Y2@TAh#h!;$RkFj zem|c&Rd|uLGCgEA*QE8m{5gQ}IT8h%B7`enGKNSzfxJ|(Q2?vM-3jI31MUv{t{5~kFPCG1zi5sJHmIz{akrWjSGg>sELI-UOI&`^er?~V)jiU9Z`{Ei{GA2!L=myz}$9TL0T=#DAmGq%l*r1Zj z>D61NxJ^gVnvV)`W4gn$ybb0EB^2hQ+CAtH|C#QZ%D#X{_aMHUIH@(m*1U@hfODvA zA4<8To!CE5ORV7*;gAQ1Y?*v6m(lk~;xKVboS8sfFrzxh!@|x4E!2tDex(#2f5BD8 z^d1z>hi_<^{(J~gl>KrGcvgkX%U}2-w>9rU6+%MLL3CLT%EneuBj&=Yvr2!;{p^4~ zCx@=CZkb3-H`A?!6MI}~8m~{Ew6h9R_M|`}9>Q-55fSmR8Sv3d=D(z|mUSs?yPzl~ z9?W=P(q`MG!{lu_0M+s60r6UzE|;TaRre7~cO&xt@R$Duw#C7>E&#Oy*Pe&2!VZXg zy#%k4B)wb}P?56L@8S-CB%}7Oh3~jcc^uu6{>Z3ZJkmZIw=oQ|o0S$)a2IP*he*jB zzZ2BN<94aJ(R23?1pYvee|G&sNUw7g`J*!5hM^t@-J(g5-kuN{cUlkYeEO}+=?iWr zehn_X8Oj2oNP)wpIhv%c4V4&jU#=$z95$gY5a{k|ZmBelj`!MO%Pl_U-uXRdZ`hMP zr@t{gosyR__AXiO1oncb^jzIP?1c-)E%XO62aa8!wEnaL7lu|~ z;f-0$$UwmN{o3lTVu6FOfDbzGCW@i_Fxdk*Z*r@}zJ0`oj_`1_%{Fb=C;+(A_xwnX zW%`c`PO|pAwb}mn5*BgHlYa@CniU0kf^@uW$RHFmiJWVFFQRtMmS5@qc*yl zDBjr$)QK9P1O&+-}q%AH@r%_I^O?OF{x;cEQZU72N7#I9_!@)%HEiua`|3&|g>%&E9BK3!B z%daY-XnPL0TZ#*-$(=gOGo`yOPo)3@1hsdod@A_38;D)U=IMj`qf*b$2$} z47#2+y89>mQR)X(M(xiX>TgdU6XX+y24l@2^8^R(TCWW@skQs@W=zPEN9y==4?H{E zFxKAeOA~x2l^nOb@{gH9to(l?9w0zR!lQSxm^#QVD`16uSoBPqK#dVx1j z8YdonE=Ne>tzggOh|9kt`l$M1IWB*?T>lbi=gnTd{ zX?}nxQWd)sycE>f7thE)+4|~IF*WNZj%`lvXh_4lGU$&R5v3KCuXlw?f8W~k%#J!Q zvE!|!xAz_B%cLaUZJDIb)tkhxCV5stYrW5<&VAQ~JVbc?`x}WpQJK?z2+lZ`bw6_? z@2bEs$j9C@K_U0N*wvU_?lH4hBipwp7v|k=I^#5!XY{_{*fyu>{H@khvpskE+`ClA zIFOMB?Ba2e^t#v9;w-c2>_WHA(IwDHj14CoULowNlCJEneM~db=V81a(cOk-xxY_T zOa%aw2Fk6OKdkR>u13{Yak#}}zw812)O*9|>(V7x4^?pMxlIKkwpkA<_R~Bo%^0)& zuiIaSQ7}fi^p6cTotdAXyaXK6sU~H}^nPH|&y)Cy>tahaPf{wkU=i`i$a;ZTfJj7{ z&=MFs{)Wi?53m#S3Q7GYP(%<4L=V?Hk^ePCYs5wi%{^XLl`m(GERhJq>%&jlDQv7g zl{8#(g*(X6p*!Vb%TR-AZVUqCq}GMkfI~)WZK@#nE8yi+=#|4X>(z%fG44m-iPH%vCl79*Il{crPV($c4&P)vGl+<94~wEEz*H86v3K@j^9p&j32ZCt-bHlF`zYSAA2c$QG0Ndar}1= zNF&P07}f7bfKSU6IQcrflR2BKJD!r0%(7Y_AX;m`7yG$&fGE>CNEDRTd^u3 z{RYppOtPnF{3;OBb$SZxxB%0yjM)pNSTk&F&~N8G$74DiW24@&Gv?y2~m!8!rl-2LwB@vDa?)v3t0 zu7rq001f>c15*M&?gA1E-3(J%S%2~Qd`OT#A5R&PKN9=qO8aNkmVWQ`9LQ;a1Q3Cp zAjSmhCFc-o=#MyRK^%FX;BA#+hRqJ7)CO46V%Tq7k`H=7H_jbvuoi;{Ysl@Uv~)p1 ztZuKw_o6N!5<*x+1)WsIX|4T4iRd4ZXb9I93|qDe`D`w*qfK76sE8$@(5v&;67Yf< z2R>f>MXpL$U-|EOHRTbp^~v1T#q||>gPharFA8_J`$HrifY+KgbR=kW{aBqs7bZ4b zzv<)O&cBrbjM-7WTCB3{-D<^qN5n~>*>Nb>c1?eF15 zAF{70PXUM(>AToc5GRj_ZE^LQ83NwQ4w1(v$0WA3BeYmQ&r(n0Uk`QQnpk`H6pAN4 zkgzGP^~fPB*7lV`H-K}$UbCU%n&xZ5dF8oBGhr{K-xPf7--mBCL1-+1Ov7y?oIQanO` z^5bC&dMPx;NU&N&oCl^9iq7W79f-!%a%i zD?xwIH#zHd*N5#xOW^>+WvZy9Ovzv+6eAb2y9#?);Nt8b{9SJv1c;86UAw4{IkB);~6(`Z%%SQ)OSf-vi8_hDQ3g~)UDgNZ0P zHwqJa`_z;Xl~731&a$wdCHQsU6%7VQ>irEkHK9(nU0^KO)qs` z?!s(ALHqE6H5xQum8|O|$?(0ue?)uiB!5bF)P9Isk)0yNuX_g()?xlQy86MT5jJ2k ze12CyjY#IqL?CeX(Uhf7WvkdeQ0N)^5uv^aXqIn)k(2P;B@aVUx+6!r0IV}Om0x)X zL^U6)xqepWthjPCO5h3q%^*Vz2vH>Eap79{F^XcS<5Bqf3C{;Mmf%kgzv=ywuDro= zz8*4cuAkMg(Ko`<>H2*@*X+hZ{wQ%LbA(HmhGR23W@qRn?gdC~Q~PTtUTAFMSQ<;# zF9PhMX%@YID!qC3vYj8fp`o^@J>_}&3WqvD(I-CbGy3|O12GqFEhNjO(8N|O%+S; z18k*j9{Bq>@RY8ezMXGE;hqW#g3pHQj!2u1$y-vO05CG%Hm^&W_6>I?`euH>rzH${Ym!`j$meV*X9P|DYQw7Ns7SDyoygAf;By9ec7tSBaeEc|4 zANOEy=H#P$hM6U#OJ}hL1w51g6@&|yZ}3Y2f69vI_$8Hk(*`?hV1QU-`t^vLrmEY8 zV7fuj>#n#o!M@TZQ`)dUwW$SEj?#(BxT)6Sn@!LSyJULgO)Ymh2k#tX>OEx%E&282 z_uQk!0J=0)xEbBnjj;Xs<9=gJ07y~0$j6}oI8sTIxa_8XVi!smx;Efj^E(;iBxIwS z?1gUvIM4k_M9w6dT_{-N(cBns^gcasfVVUKeeZ=Dw-Qs~ds%cjx~b&L#J@1^EWY8? zCz7MTXiEAY3i^w1WOKi}-oS~xk5cqo_~)C=eRV!_UCgFHOw{*HunT+J(xty^zwcE5L1}~Da3|A*&h8?%y%le}$-=6h z4#+30W&3%OWxiV(ChY&KXnFck(1+HuT^QZJDckL5iYYRl^+?e@zs-f>^jl>a)!q^= zZtvq~S?lWC)W@Df$huztSXk%#Bo?!a7=0mtMesOM)lf=*WLbM#H~$w?7484;CEkak z{fnVHvw*pDPPsRCl&Hj2KaB#-$IoYjK*K6&@rDm0Kd#L2@55g;Mc{eMy?sh?3bR0_ z``-iM^}6HviK^dtZ6(iDRCUFVZKZfJwC;jSFZZ!_^y%F z1$^_M!k+=Co6Ukroi>;Sd6o0!H}ZK*hKf#H;=|u{-*Dxb1W5)zI~Qm|d|#KLt> zx&hW~C!l(oh}XLSYWqjx^^C`QsFFkpcd24Rl`i{=x5}F^ik8b=UVMh&9lHB4&6EHv z@HqEpzU+|`Kk5_~ao%n}vP>n)YRn8mgxi1v`XPyl6=F zin&BADAHoPdzWhKme3N@E>?FL9^XlS3umND=;-$BwYcWmzMc%@j@8^>JB-z9(fV_8 z{^x?|nuWX&Dt#IYr339gi9_$S(k``B1 zSY2=WF9!~zx-^}CLTIfXnl#q(W|RgyA@N~lUS@;a#y+5)cqF=j^S#EUr5Lo8IpBhV zs*{cDA8VgbF3E-N1W`g-ElmWIv!wXzy9S!0s0MtuwZhPZS4x5y18&iKV`}SJ_A$_I!9XAc%j_ z1XG)HZzYPJxO&}gfXpW7fG?iH?h2GC*O<4{77Tf~aUYKOMCu@J6GcU{^U?yM@+KtY ze$Zz``TsUeq~(`AvnZmh!1~gPgM|Sk_k)3d`i%B2Shp$dQ(a`(D}`)2%q#>-BBpi3 z(%j3-r!Q1qbpI{m3lL=cOO3Z-^z;Y?K@#lU9La1Qo z`w}8A&_BKma}63!*D~DPxs&>X+sFH*ANS<8zK{*f`Q6CrvW|n*n25o3TH?slW@St32$SPhAZ)#h z@n(e_bR;v%tYvf}H3A16z48Jy?tT7}%_1!$NC$dEQwHBb9n$WlWPTinomjN0x zSaAfkxNYYf{F$|dZX@R$E15)@tx;1xYxzp>F{90IxZnHD+Gzo6{m}FR&G2a~$`^01 zPi7PQ{$Rap-2Hn#B$)Fi8O}o!-=<=0hj`uI?jMq_QCVgA&ZpbPr=J(MD(~nax^FQG zu94~E$_1Jl*M9N$6cI5V;vb};XPT{M1tVtGz>7oZ*Rtrqr$B%)D6JpOm0?cn3 zzYxYcw}iQF$9_4?h{KJ^==mQ^6gx&Ao2+3@EPW zmRMlbeR!2IS>n6R>A#vc^++K*1;D|4aL(n#RlRBzsBT|a`@r=m zoHD7JlCIWRwaGNu?Q3MK*U!DV`i(H|9)ffCDQ!Y;myWGMg%%khfiytg+QLjs+`|Y#E;%jOx9kOkFHiQuL}AilPBRg@~TyX;>tfe zMLeW?bbav7wRBr>DIpRJyfRxHE`pTok+Be31*X{89eqZgMWot|r&$^7iw290z}TYW zrp%atQ6UCNt#Kq^WAhFpG|xvZIto_}zqu_`xLFigwH|?J7~1Z4D;x2ft0KEl#vlr6 z%?kGOVqH|J*N+q$cEw{3xy*)y!JMV|4GE=vQTvguQ0}2-`au<|a`+f2m-~!(eCCEsXm66m^C$F4=B->5&kZ{l%;oDrQU=N&n%p?`z3U zorcF$3rakVUuq(J%-{-9_WPVh=fh~ZJCcdG@9sDoF4c7uRRZM%6;+nW<2)ye*!$mW zdMUC5Lhu%RLIiHnZaTcQ6viDTAQE}+MALQq%dFFMAaG&EBbhlxFGB-}7~^HXqVhOi zG}r{S3*621qegmBHXTn8frtD}($4bB*>l!DJ!{=#4>#>XmB#XdFR~VV!q&<>&uWZ* zKUf?{b+c4eSPz1dHxQ@EOeCk~C{%aHQoL=EU~+hu0(^joHZjb1xmLz_qwvhLvP%Fi zZ#0~%>G>6T0>l0MUB$V~Pf4Z|NJ{Td--axdq2%rr2bM!>V+qJ^OiqJ-6KFpV_JQ=q zvnuV<9i8Q|-=yxN%U})LT0_Cj?AiR)lNa2d*wBme)?5t<g8;$lmS4 z)yzdOV*WhWRQr+echT?QOx-LeKVyfg8J2rSqP`J5Q?Pdqx5)_kzblRTws>F?@X6r-mevZ=aB8ptoYwjPK%|+XJS?u6h!%P@b^-+3!YVO3MR4E{bc=> z)e|v<7L_?Hw3l+(6mke(+gI_t!OCuIe$GrLl2e<`S%0@A>=XtN=@+D1#_3wU&^~>l z;=FKG3N^X*lP_SS6=kFooCQR6mD0D*h4uOOxcH&WLSVm{D3&%B zA`fJE`qtZ`Nw^#H4=b z>=l)Nn8X}(%0J?*u~?$vpA;lBTqAYUi$){q3p54 zWi?O{OWj=S9q`fVY#HTE1+zZS@|Ix!fkp6mStHS*ugmbyyc>6H9DcrIVhBq8!tF~f zQhVI$d8IMtgS(T~^X)Q7h)ZGLDf#FLj>(zcp@^wNIkMAOs5F zFrRjR^TUjV5=SoJ%k!a{h-H4Y4f-e*wKk4!c7@5LBDwa;;7cY3t=_33JD8*;)*SqM zA(pj<#-ipOxU`t_oP}rl;}tU7Sk4`{U%|!rziz_YoPU+xJ;3m4C@o zx=b_|Ib_?*8x}5@UDE*lC|iHPn*UAgL+cL0tFYR7RE!UoSP6Ui9)sil`3VN$znf%( zY^unZ2<=M%j*wj-Ej`zpHyzywKkNPV#Y2LWmb5}!a3Cm(HW4<2e?c-dWv4}~{irb0 zYhvq?$skBT*VHSUx-gb?narOv>#!3jy+TN6TstUuf8Y|; z)-r&Dn68zk*UONfXDqzON1_W7h+3w4sH3>wAI5EfNAHVOLKWL6N)hPQ($x|(K@gY4 za|3#=V8_T%>4!fw6qdIvhefMGP!grM< z%r6}auBFj?o)zgK0LpRnr08@`n`T98#RoN3cPE zC*!`yvNsiQH6;D4%pq^~##w%*6L{3>g53sYlu$}EKh6@+J)k4DQ4mevX)79*Km@C7 zk?zwkz$-PbdPVhvE!#qfeCF<{NrzB!T@)x=)r(8unf=C`0qg zNe&;Vm4Ixvoe!kS2==uvMMxq)pNHnD40T7lP9U1cr;Ve|b3LJQEHBQrzeI?EyQ%)` z>>QO2>r~UyG4H<&=w!l-9V87zf<0w6zdH*R~rKIR#j3L@ESzG`go?=Qtd_BpTU zOv|MQ;CTAnY(M%PUjV+7OnDeb3UMjtav}3a2{;;kYRggIBHpPQGkUoSfF`Nfu=8re zEFzJE?{ZLeB$ilTSo0mWhwTGX$EzK$;Z1=#n)N#Kp-N~f7M$jZqUPWOLpr*zq)!P} zyndh@?l3He0^c8~uemc4gh9h>u8^?ioJ==|l*H2t7#oJ|1`lVmu{3}9zHV3-+8lVA zer8aYQQxe{N6K}dh~sFQv|QEPJi)BvJ(cgQaSsrZXOjKCQ^ph8xeYj%+OMyVY~U5X zq*T=bkMTsDX~wuI5^9?3&qK#+oqyf)Oi`Z7?`U8xzgf$h!AAW2mQN0I3vmdzhvlR$ zvQPLh4z#bd*KoYC0fQ7<%t-+QkxvgPe(b&uMM4ayvK zNoXBC=+Tv^GOy67I|ai{Mx1E=_nJ%o9-%?a?1OKv&Z97WIcCe68fBf}yYU4eSV+!C z=U`5NG$>2*{TC@K8x^3uMap-WncrAO`2<)z|8%04R#%T6x7!kbwlv z9oyO@f`G8Gm|T;|&C$YR_>*tdV&4{bZLV$c0V4&ZVac&7*P#gV5Zy7}A;rlKH&wp1 z{;_Zz#FTG}IWl?hD)jCk&Uwl?ixUo;5HT7$JOEal6! zor1!J#L~TI*-tXs;1tMu-0S^{{@Exk;m{-3Tlwl;VarIdKh<3va0QFT22RT2rV!9c zoeqZHKcfj3s6GE@a*~G(cjDB;eO%Vdr`R)Ks#P&7NXr;!H-BWoI^}x4nI=dqnyb?V zErL#wPl>Dx^uCJO>MI}co~?QS12`GI<8NvK(sG)imQ_nmSIr~QRtKhK=8bv~TZaG<(3NKyxUu+`Pvn*PB zv*;||Q!cKFliFLrs4@>=m+PtH1EBix6D=ZmM)Yd4qKg~W9o{rPesrJ;#FMvwyb+CAD$_s0Ku zM;5!91Rocc#l`1hBTG_$mm(eM{2+P0Q!MgKi!PN+M^2YFaHP+dtH0&AZAg$NGtYtG z-28YZO*kl_t#tVIxK2=^xX{j$Jm=o$LEl@Y%gnTLrd%SX>;QQ6Po8K9n=aX-;-;~i z@3kVF=Z6Wh-XdIfzLYM}D_5a(sg?mZBukEFX>r+Qg@LIn+oXIx4TS{xD2#eWD6T~J zDX4`+;`a9B0m*`n?jH8aS)X|Eief5{TeqKrQ@ERCrI1hQ4e+V#CmVWH-!Bd)>Xq-Y zpjp{4k$R-tXoC3|tsnjEpi5wa7}QP%J5kL=48JuA!&G01p|pih*{mhtUZME2W(+b1 zn)emm7TA#Ys$|KpIK7dFF5=b~ngToIQ3Qj!(^#*#^g`k> zrMpr5QkB$<5`UjC)|{w5+{}7smUO+IdpVPhQIh(I^~+x!C`|kP0!XL7x|TaT#HL)3 z2G`&>0gu{T?}#_38xWBqU6|G;?_F2bo@fZk#>(uWC5GV5YOemy#&8b>?SGifZ*5|Q zyLtIG^V#Mk{jePVd!6R&Qa*@1$mI~JEWm!*#w2$yK|cE$sweAFCsor9;f{sbX9jYi zY;7c;d-zv}OjCl>f{}@zs)RSCp->e#kCcptIrBz*M;s-sz(l$u`o&`+&r$EbXF*Vu3%9qh~oidA0Y)8_eg zb+!13SC>2i$8ldc{eL%*dkC+v;hP(8R?HV`^C$nKk*RdP;{>I-jZ&S~$`ExP#=OTI z42qMM&B>d6^#SJzVS1zLH|NaO&n3i`kDV94YnH0{{3h z0*aO8yYmu;%u0Jh~RppKwd+mI8=xFN%dvFdoHlWz(bIjj~1n= zM-N#1Tp6a<-F0Mk>woC*OYcSbe7Ea*#RiasLF6!D7gAb8iso;U>!Hqzv8xY*)AZFw zK*CB)zV2=GbyW=P%oy4;QTzs$d2BIDTs56Om|e_|6K1+U)!sLc$RTW9W>JwK$qU*c=;g`arhX%NXhu)D zc0w04NyxbEgdQUvYDiiggS zfmhZ#_;eNLj2f$^nMqNqkd5{nN7#v6^B0txg_~g;+0(rqMa%FaRaAn8{Ch#&hTx!4 zZnJ1t>mzO+UHFT1i*65jiD$n&+mR6z!Q_mp_!&p_I}KSUAugmw^F5P}_pZE3=$XL2 zYk6K^aEWqP^TNzI*&|~!VFXBz-PP z@A=qUGk4VQr_^hLVAm(P%D3^0fihm*zA<{T)mPwqjk zUw=YJ&p#h<=dwm6)z!Yv%5}x~oA%c14>D7TN5Umq;Q5T9t(M6sW!%6{IPe_(U&FPs%+y zJM5bu+}aif)|S?PiulAYR@Xew_7dwJ&b!MHAI9Oe+$U59Zkh`A`>k$V+7&fU8Pf)y+kx?%oaTGlf6kUHIcWHJk?M zlEK_?`Mj}1=^WpI-;FtB(eScZiH;b)c1gMDrMMMKSf1v=!9it_&YfdXE(f*}kb%42 zhiNPSdu^A4;%-wRtPsMdJau-bYFY7?Qbk6SL=F#H0jmYsQryDO5hH`&vev89`N}sD zjgTAfAP;o9rgb+!9<#iPpc+@8(^JWn@Z}qe;71T0yJvtHTi4lVT9EY#*T03Hk*Io_ zeg+E`eP3wl5niD?=y=`>2pr<$ip-rWnGDcf#C_sDUtYR}Ekb(c|DKk-O*()01e%CX z8Lc+z)X_DQrQJtK<|gKcmekvz!3%waG{ZjjMY)bI#*;5v-}w*=jM{ONp097Y64?X< z`H}Sr1Ni$h#sbRaqvllz#+O}9&`bl*iSvdAn-jG)*M$#VB>)MqV8t2HqEEBrUV zr@>JHn#O8v)=La!ff0AJLnKC99|Vq$E!u=E8d_8WvA}4pquS~rds(%S9a^^Hzd%41 zsr7G;ZhBbXE%4ZH39QJ|5r-Xw|6{=-Ruc&5fa#m=0nH_4IrpD{B?E8BmU<|7w6Z-j z@b}_H6qWIzBd+Hjc#ZGZ2=G%i%q_yYnY81C=Z3v3i2XaenR|ZEK<7R~>(KKp=ftL6 z+s3&)-9F*P1IwZ!K|mtVin~*0w(-13vtKd9eTFjrt9%Ouu20$2zgC_Nm=G>tQL{y$ zFkq5|6_W^l1o`8kTyXfY^|ApYLItFo-j~8#n+9VMwvW;bOX-{5K?*?%hVN6i{Y5q$ z>{!i3CgU)%0bt|0GC{Ez9{9kNH!yOfvrhy zTT^hlb~`|;3VL%~&Kfhm{DSsc{X0d0wO}|4{iW;Y?iKsfM@J@#h-A0B%8t`&QV(cogVLDE%-&g-CI}fdgbyFSp54`2|1DW0@CLBAeP0utY^~vE}B*y z2kwL=Bl@d4bJw5`0;z1y)y-*F!OU+aI0$*9L`Shdg$_Waeg>CspxA>%U>TK{jhnK` zSttFkn)><1xh*fWG_Db@?jL(TSAYuXU)zX2*XcAT_6hvhf<=3>>p9^JJ$w!S1Vy-= z80NJLi^vHX2Y*-rX!EVX-Y-FKksqA5CMKLAG}skQ^kl56^pOl!n)$$*!|cVs%C4MB zqz_5Gu|DwGUbYwkx#EGSGl?ZKUjK%k(jMJ5@1XzOFS99+uWxGCJ4e?Y!|5KZ+;TAd z??%&gNIyC5dNjVJqL`RO&U&c(Z)aEE%BrK2yr0Td^L0M~5-e2jGtAyN%~m51l4x}X@=)q3j$jJDEXsACVydEF;8SGtKc}? z!X~T@vDwG{1}biH3*j8HD+!tD*uZJ9)37a_dqsOL2Qxe3o@f7U)KjmE0tT-e7M(Ns zflQ*01e`W_gfS(blv<3nNl)v@)ZSX(dlJ?SDYnE;QUaCjX&cU;w-4ZF8Zv#s1ZY5= zA@LActr@EKp5ZesG|7wc-9Fity8f!9sqbSkUo~=}49@qhPzcOzHFnkOgW&XZe;6~q zx(Ste@b9SUD0jbX7z3XYb_N24h}b`B!4$gs{RRw#?9T!g{T}3b{vJ1&U#%6T`iQka zC!KElBI08Mghmmf+`F<|r4IS5)12z1Z9bLZl@RzP#^kFm@AVlXb_~EiTZtz_WYMxhirRzVZ3Atz`=DVUbM1I_8)+fNF$2|Ja5eh6cZB;k~AEPeVxNzqOJv znQ#tp4()tf*022DKsU!N`Iuyv3ZuWtFfJ1n82+;IX9(KIMUM3_$nU@Qb&#+F;TZ&a zz;Qnf#qCK(i*9Nc@ar|i|}rF@ie;6+G5 zEzy+&E$rj-?}wG`|ILZZL&yC>A<^`-R#$hTWoY)=ziw8P6SbWFaDogI`?T>-UQk)S z!Kti5mf!${IU1)B01pLTAM%=>8;B!6(`o(L2EliXc(LxFBk?nQBq6Udby{qu(SD8- zkyfqK*(McBhQHvg5Ld`wK}}$=lc|g9^g}^}xg;Fg+hi5n7Vw zvtRELbG21sb3j1sd;5pfL0ycbm&U0m{?0z!z_#U*>f80EVIPyF!&_Ab5+}33YuHV` zF?m22;I&Bef;-b94}J-VM(3nJVuGc1D>o;Mkl{cU?e_O$_V`|`oC$$ZH1K0Jal>s< zHIfKQ!$)6NH#tq&_IXnsUjD>vAzJod{3vOYZc&#>(TT%x!))1!FGl_tOloQ=6HW1X zy}rU7#p^Hr0Z(q@ZCPx0Xynpw&^STDzNZ+e8u{dKao1vWQZf53Mj%`{CXg@*Nk*PdCg&KG24ZxT~%CYr<(CdOe3g08M?Q2I|TM74^i&16{-{4`@) z#?aNYa;DiU9e6~^U3*=BL9KzqZY_GcJCruZy z|3xBxcfnUEM@Y=N#uH&8yM6*1=3kHl&Fr-zFioh4qR9*K{c)d;@+dpyuAHm>jE_4d zKT%NN>`fJM`+5VE=`EKD+o&cfbE72WyI| z)~&yIWHTpwG)FEclGt2)-zYq9W3~ul;Vz@I0%}QAHsM@=23ZZP_3x@X?et*u)}kN# z@NVtAsq5yqt-oDsZ!1}uJ7HIz$^)Ml!ixYTy*l@2k3TjG-1%n$@L%<4f3()D^)EzA z2DpZr0Cm5>V`Da*rM35o2{{c3^a%tR55J%DXo5%bv&Odjpr<7pKbPu;OOSuO1;X^a z$LJow~w25b>Be`-CHJv?vde05_jyI!8wHvsj(8E4`)aZGiI4O6C~)N49AZ+-bukC`AfIP; z;X;16+Cz1YGfv5(BYCx3F{<}Ayj2`BFbBbbOWV{p(St0!(4|+C&b=uu;%{!^CD)}Z z^LOJ`@2ZOaWGyj`a#@+-@R{#DX=cAnOzt}{H-Qo(C>l2?9g_G<(G(Kqmv{-a3qlJp zJIEgH%r}fQYAaO5!zFz3@TyYtCm$x38$eji+xN0Ewaw762-P0_DTGPSEPhq^L6uPc zcT4da0s2jfzpUQ|UhTkQRW_NB$Ug5DX2q zVZ#$&`~JQI1}f_`AyorLGECywoFp1Mnn^YE-*V-0ec|Y=*O?Zt)6Fl|cf=<+_^J?? z(L3*hn!UzHVZZ(G--Dt%*|}rIyUxuEFBdU@-(}|mLTmR6vHl84V5MI-yul^g?9WqJ zEFxKxt!c>R!kT$!H_bn1c-%Y^&kdsI<1QH|5pPJI0PJCDKYqE$gu7+Sxl!BMdr!a1 zTWs}0z9x#|w79TP;;4f3%4n$e&MI7_<0o^d*0TL&Ch?-WZzI|xNPC!ao#(loG{zl| z`k-WIc!XvDH#iJ_tMXb6hJ4M>0PIJ}yhyLe6ur+K>Z?pD1g+`%uSZ@_DQ)Z?^mf9NF0ddoW)QNr8yz&K<>&OBcXYDX+HiMRVLj_n=&EB*vRAwAP5lPe zwpT`>asP@U3b0{2PaB|ZzS!l=Z=uih7la|+(`w??=%6-FSP zQcW{#NU7HZUi={LmcXw+jcQ$)7Wc@HVwWU)RBij%6i@})gfWeRU|8a#G){21a54l| zFj~u~=27nN3?RZGrL`57E57i&`m=WLewVal$$ z@1L#(|24mZ<1aJC;LPiiSf2sHGb-Un*Rv4|FDI4Oejc;MhY2Rj&7G<05?{B7jEYMLR@FK~YUujr%2u{d& z1w%(qu>4@EY zHwshaxIO|8K;2pxOLhY}C(=P@a(RM^&pWq6iA!ved53`e1IcnEL#BSw;WXruIi3~E z0(!5Fh~GZnbgZnCq3&`+ar7r~7M>mfj@*wv2me=EtuNy)uRpu1|R|Bd~f8+DR2fGdM?~lKit!il1@&$9QO+oi3e})E*M+V<^MT$F+o`)Qo z<>|g2Zx3tQzIZEv?XMCEdmn=RA5jL*VOBC`^CBG%j!=p?_k-IX6MO=jLb@=YuRoMK~v8O*pau5FO&F}ZT#)?5CJvy2*m0APwC+j@x z0sDJkdqBhuHGH(a7viZoYL(eN^J|6&8>IlX`NZt3Zzkus?sgC$|`=koe zW+W0f6FWH-Y|#Eh`w^jDh!kbV z9Utr=5YY`^e?KlZ)U(HB#>4`EX79~yM$2Qi*51+HmxCicu6|vT-mfmj(&?=wKo!li z&i_Jeiw9nv=4T*h-m6&`?xv>n5X0zl->mQ{ad>#hgDYW`RKEh!j~&bL_UCT*`|Pi) zSxfoW4Bm7AHN1ab0Jg%0|oz;YX>d;CWW;I?4F5?aXNokjeIOwQ}+ zlU2xSU@Gw4g=U^r>mHX(zN+h`TiS^}I7F9enrFGO+jv(n)o?Dss_fOVcPU?q4mDn& z3A+kYtq?MH>g*p)2e&`Z)*o9#*~CAESVmp9*L}PlYKeY!XmBN|)f(3}TRric;Iv=d zV@0t8j1;%n`ytSf9QjxaXtugD3=;?OcDypx?sh_jjjU)B-m0g8rnuc9T9D74>TBRHgOgd{kk%2zU(fiCR)4x1zPNs3xVbb}3Gk_7Tm6>`)3XEt&>-L*)4s7u44^8vPOn^5Un^fZ zA;*$mg<`vwBwer9}=O9K>avvqr8ejoIpwdDXU&p4P0_H!Ad%Ap?*G3*XFPVm(s8P#WD;c|9I^U!#y)}g?J6z}u@ zY5nt|e#-8Zny&x`_^OM2>#6>(tQac>g<%L=<(+iP@M}XRlz%_n3|+V?*sJQ`;%2@_g^3LxVU5$Uq|H%V9@tXoQqlLm{8MR zK+h=PNvEGF?vs~jT%UEqrM8(n&zs7)?>r!lv-mM*YhT2)(6Q=-VHmn1%gH2l3jM)yx$Hm zo4v>{lKgV_x{Dm@<&3h|=anf3)>MARz))-H%zO&df5d2vP=7z zg<8~X{7s>s@P=^xX{DnK7{Po%h^lwOez@5oM;%KyydrY0IJBiLeJ3tl&+cA<`!Ze4 zGCc|eZl$_|*Vh^DXVRZkf;VTtH0qdgE5@m)Aq0;Hri#w>uxYUqMsy-MYuM^zXW3Ks}$nkk1w?RT~1&d+=8mGAhQk3}K4n-^^J@&&lnB+a-&!(TS zK)L(`O|0Kpfq7P>Y9@l3x00etL0wf`buQaDdAS{`gGbBy16%(pVCZuMbr;$SpKQk@ zK!EtRH$*S_*CT%HfKba<5aS60xaUW0*{U%^|IrYi$Ff9xQI+QHeC;tCe0ZW?bnRyo z`~LLmeL~$^-2@5&4wHd zcxRLN9LZP?P)f~lE0$mLY1tE^Hz#;4se_@=fQ*o+f91+PQyw?iGN zV&BxunSB3^+%-__URTVLqIK+!|?+e7gDhWQ{g8(_Mth*9UN zf_5+JNpQc%CuaznP6kVG=-|;ySkwd!8@RBu?1)u1w!nk$eY*{55DIx{_uA*_0PTUQ z_*YaAem29Js?Eg4io@e&ef!e;=`L$!rEguyJQp3r)As?9turp;Uz+z_bmYo;m&dqH zV#`p)wwYAP>8MGgyCnxe`WirIxJamRGm)WnNS;(8V*T;D@Eb5+CTUK>BP2sOrTZuqk;7n?1B6Wf{T=Kjj%_|c4QP> z->5C&y*TnLm9hy0A>sLlK+LL=6HTpreh+EDzmL`L_Qp6uJnkvds75%nnZ5E;>G8|j zP3ZKl7?5Ho$dHtG#Iy(Zcyan#wgzipA>5p?3K|iX)T4*oV^wtFPXvF~#F-_ZF6)2j^PX#uxl>O23Pd%j0afqnr&_~lg z?s{eaqLYZ9qlmUDr3F&^y9n3t1@?oVSVnsIe9R@63|&7 z>qS-c#@bG1?o)8iespt4muCo>&yCjXUc;4ZDeVgXd!ABO9L$QFCiJWCJknAcET;Ox za)wxWgb-fD_~Z;CAKkG}(sxqpn0P&z+k1gecmKk(q+5zHLh$3Xg=xZ9*lYeS(m#DH zRe+r#uFELA`asKOsQ(GH%@6@4#0_kv_*C!T8xk1si*+~k8j?h7snG~unG9gQ!oP;+ z0?d3LA3|(A5>u-3L?c8Gffpz``bmP{^DQ7VOQzJM(p0hcl-7e!3F?|saeIyuIE0z$ zsv(XyE%OK-OXiX!8OhVnDQPzeG4hVWU-A;PA53g>k z^QdFe(|z?(sZtEj&XDoy{oS-qdVfv0%X9rVbD zgs+Ph>2VZ`>Dw!zb$dJPjS3MwX@M5JCf%j^oX?} z+16^s@QS7MY>+XW8BLRIAfXoBd8!+;8yf>j?FIymNLJ` zDBw4)7N5?tGSNFr3v3rch0JKyLFz!SadldEgM>W?I+_15R#(-cPq4SB*Cg`<3s8j8 z^DQY0IF3C1lNXzRp4F=nbDtR*UgI}hmiYZ~n_u0c`tx%kHTv@ew0z1P@5(yCskL?T z+3bOj!$9C#_&K>&pk!$_WCAGVP?51qO{ld8^I>PaWp4~1G&`b!^^f@nJKgD% z+e&``BPmiZ7}iNVXtx!To#vQ%4S`S%Kf_Jv`R$9~KFtXdIPQPTxO339WpXIESinWc zE)W~}ZHEt!`^~A`+J;wTY62K4fh#|W6;v@DQ8%xOqIS$p z86WhLe$#sMTNqxx`Wq+*xcaAftj<5EU7j+In*dUEFw6&$%*<$Bz%HOFm;@p)gezXE z-1l|4tia&>y=FZJ3bKqk3YbWCreY3|ua(IR=$D5BUZn8E5X z`BiFo8$%R9m`~*XsDHa*T0c{?3Jn;1)a+88;X0l}F=(V9hC_uEhp^@^*0VS6x9pdW z9=mtvU;EVJRh~laxXbD-<&Blc-`hdefsi>H#xIiweqQ>$RrwPQ>GHU|R_nfCZI!F5 z4T!NL(Z*Q3;a)ZVettnHMXkAcmRF7h)#uq3=G$0_fdC$pvCa?MqX30>*UJGyHqAul z{iRF-C>Xg9P|jnWG5Wr;F7y|`^_ib{CAKalIxF=achZW7A1-lq_`C``Wme!CYI9_j z?1fo=XJzyLZQ36nX_IDD8-+w!2_AUU^fnqkyyZ*uVxrI>)a2Qb6{X0$IKZD!v)e`q zW$=PoXn8V4JZ?gp<@Qu8`ZsUHwRC!1X8`+rbab!J6$l?dqYC?`6BEiF(tf(eNee_5 R=1frU9Sk)2P0d3Kz`zW|f9n6%|EvEG0LtAAFaQ7m diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/intset_32.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/intset_32.rdb deleted file mode 100644 index 39c9609714a856986e6766c8b5356a7663809085..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44 vcmWG?b@2=~FfcIw$H2{*nO9PrS`u$;B*MbLz`zW|fByfg{|m(bfcQTEFKiMm diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/intset_64.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/intset_64.rdb deleted file mode 100644 index 70f56c05c4dd0810e86c71b1fc25268cc1bd472c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 56 ycmWG?b@2=~FfcIw$H2{*nO9PrS`u$&qQJqxz`zW|fBydi!TP^Y8q5Ro{{sMJ>mD!w diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/keys_with_expiry.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/keys_with_expiry.rdb deleted file mode 100644 index 6414be592ac57c0dea321d405e281fe5acbbf677..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 71 zcmWG?b@2=~FfcIr$MDDY#$k_EMg|53k<^NU%%ard_}t?7f}+&q%;L=aJZU2XBO_fy aBV8j?1w#WXLqjV=BRx}dV};NV=l=jn8Wr&X diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/linkedlist.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/linkedlist.rdb deleted file mode 100644 index 70cd20f5df12ecd42c809f00f26ffd8e7f6eaa2f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51032 zcmW)oS+4E8afRa^%5I3p64E~Q`9=xrETnXl>VNZZSZw8r4(f*_garxvO&(_c;2qbm$|mN z`^LBMB6iucwuMVYs!nOEq8}!Gy{kX==PBjfpJR7cGMmC;`l(V%n(Y;N(Ld1}*H=YY z%e%<^`CQ|w+V+*Nkd>%fTYcqV=WsXET4tjh-k~X`sV-I>;#5UluE#ib>(fLjm0Oz6 zW=FP2afWDuTx87mUP{|f@2a2a+pE%lN!IQ0tWPhu@$16T`S$AiOAYg@@3U_6bsRQj z?(Ya)H^pmv>?+fI`$DX8)X&I%Q&F^sYp!y5hP#{^d0+RW)HnKV)_kDnSfh>K zkk$Hao1t2datl}YAYN zcIJo9rTf0IITjV4`x~#Z>{J)`GjF=m_q86bVSC5a8g*WeJ6fUZPcAX$W*%oTglWC< zr&-2ADJ8UAa+lrLaczT=bqr#Nmzn%xWuLxkS;V)-GaKe|U*tQ6Z~TV6o9eT^o+1v< zYx=?)J4xTGUg}1SbyLK=p7*pab6&`~^0U3Vd%vsiENZ8l(Nr<5j>Pdvht7jctHO6=3;GC{cGD`2y zdL5T#Tl#Igr)~;|kY_Kl^IOYY>N)T~hqq6Sb%XPVEMrzKZ&}>ttou#dX)E{ZRj#|+ z-o!PILzD7pZoYN+9y`AGCHWWKB$fAbb3C1#=e8Eb+iq^q$Eb$NhTWOAnB}C)EW5?v zq#m2v4BuYPdV8u^rM7rBliyXQ@ANL3hxm;unl`i*U*Rh^S?RKWt}yVTY}2R9i@lT2 z_wsm-``db#9{U-yscGojY^Ow-e4SsVSU>yu~N zR`c-W`cSQ2!_YR*t@4XsyHv;JyDlX8B|UX~LS;(VRd<(X;;WmQBeZ|X_vb3AESR*^ zMmKW5&+U@K@R-3~)0q#Zo%?#LhtkJynd8ty(_C)laANS@wMAoAnh{Z12wVvy}{M`%TOF)wZtZZp@!bf1zt+%gWFWG zbn}*-V)f?ph4!);&NvKnHi~ZumVd3O&S^U|PbKJlpYpmD#WzV|zNy{!^UGyNL3OG3 zx!1+fWJ4qJoCh-0!`QU#K5ms!;=Z$I-exXo=ajr(wR~l1zSq+ys(0O7`-m&A?l^Sy zI+bGB=B4rLY5KMh_b|%v?}Ez~-!r988}=tV6< zM&+b@X4P^e%A{`6(ZqJ%sPlO@c4bhtJJ03OKI4vu{*YU(D_Jz}EUr7;s@_$34leU1 z(HWnk9*XOjTvb=^?7y|{=Sjx?&2#@Ap^wjch)e6d@76u3`@35|UCmc^ce9VVD5sQs z`L6Q2M3;`o?8_#czUMcLzBxkFZ{rqG_StIewJuj2k6FxuCM>68I`41UjarMYxvs46 zYqHDMP*q?4L;pQ?&P59ldwiI+8MqtEo#;wcWq3d=3YPwxzmbs1UYLoaj^*&*?)XK$md)KVPCfax4 zTIZ=d6Mr?+eV=VdA-+PW@9D~N=dr&lcfB~DeD;rSyG`Xo<#pB9jxr$v2um*g-tC21 zvuT*06uWh}4;q5q{n=hkzi1b#@}B+9XQB^gb6&RlafIb7C_^H9;+o9?v^Au)GTEq$ z{+oP0PF?3l{%qg#<*RNgu{o7oj&*gzFm8cz(Vczf+g6O?^*M)q9xkxSsoyV_Q`Rld z=BDL(qdRmK!Z`Jnbjv$C-EXlURlON1#`BgsDh}VgRH-kQXqV|e-=y*v6bO=<{ix

DcPu#7 z!Sp7n?`oSbSTmU6k#A{J1zyctQ;S8I{h9~=ZC~B4edHQvnN{LoaqYh+s^6ETcGlk zotx^b?U-yng=-zsX@PM1c0Q}G`RKlS%BG6bIF^Q%n1%>=GlTn@_%wGDo@1BBx>mMN zeH_{X z7@L)&9n!ok$*Jdw+esySEPzEfd5VoX6eVp3s+RqiudTTC(%<>l?*mXd%)UOiSak36 z$n9Q~-Ab8rmS)-fKyvgq_k3;cmG?iC<)o%>*xaGM_U_Fo9ZhzNZa+8kmhs)MQLKF8 z`psT?*Ew`YbL92KC75`Y?m7oq89m!__oJr$^YHeE!yN9n+IFkA^@^*Z@HcaRly!dG z<<7^XSM;Krx;K9TW7`tzvPkoL0Xn!Yn;!1!t_O9k}PP26HSv7T%astK{SL$Qa z_3blFDmy_+`*t|f*r{7NFKW$;^^@nCUm@(j%Da4TJJ2cAu;0gAy{AizAUa#jwG@pS z)iu;ty~*yH?NZ#La(0m2VTAP|ua6?7dfME!PL(d?xl!`m`Q3efIoW0UWHza$*0k5q zZ)ZWR7vp#|I#XYLwELx+d~cqD-gY*{O&O{v@AtLj8)Y$*t?2junM}BXPo;jic5h#PzkzK(RLuZ?ZTQFDP8+c~ZIKmkHG$Z&!$;?b~@ColI}#+kQAdS*~`d$M~f4 zI6F|Q?c2R>Y&OUO05{7YT9BK5_aU!~Pn=^Vx3{TPz3^3yYsbQr`z$wo_eQM#HUXC@y1Entlyez0VKRYL~7xvr&FSmYi#r`c#ecLzy2aMg`uG=~_Y<|$|JXzm?q1QsUOz|L}|RBbaq8!kE=gpY~;9) za~$`ncH7DQKbEp(c(+~G4`!K?HSHIwajKT84r$C^-^e8-eFT1gT~+O@@A0kX-PF_4 z^$n2fxHmDoL)fK~d-Uz}HPTaCbDsHAuKPN4e!X=q8Qo_!+gpo#--{>p-L);MntNl! zAxG!6l5s4ryWet7|L(Yk`x@?a9bfr1tJdq(0O$1HlyA8*BlZww0sF!@7qp4abt@jO1}Kch|1Zr)Xt*+Na0eEBGPL z=&q0>L7t|3eahnx7@ISWqb<5}E~cukKP?+~9NrznwdKta`Zd)@^SfeWKAz5xWSNWD*>8E@->Y;q(Nyu5I85>_Qgt0>Tt(TH2~R%~UZwdX>rRz8?vixNTnF zR3Qk0`x#FmQ`A(?wv3aRAIDs|Qs2wFqlvqFA9gd0Z5ZC8{IV~r>TzACtm;hFcO|XK z9Gj}Nt77=R@`sext8x0ub<{e&5c4C|Wz462?Lo4M3k6IrBAV$c^AJ za|dV|ag0XO68$J*VL{n|bl8!hf(SZ29Yr4HNh?Xsxh?zRo13df;V7hUyGtGQGlY6o z-y+SGwUm;tKIue9F7LZ7)X`m0!3>YjP5>#-45v~pK_=b*RT z)KV`q*&H3Nx}W1&8)Wo?t-feb*np_>($VDRP?f(cg5$E{zttdMbKfQK2crLVwag_yUb z8pHImeFby#`<@ow@9rO|mb2RHDQ}|;Z&@4PGWW`2mwLhq0XE0o5`zkKWGTO=6*Gn~3?tG28=6Lc6Q@KlK!}~0< zzPHn?tEPE%5rw08$24X}!}Yxh26eIw3U;5L%Gmu`y12numa>~a*MRuF3FB8y%gsN` z1euyJGd4bMy-uq|3j%w-IG&4%t>G!=3ao8=aP7dPGDeu*Z+^!*u3cX4eO=l<(<@UO zlebpIPPyy5;aiN?t{c~V>UPO18nV2OUvyz?3#r937NL;xl~?jlU8ZNw&jWJVWctnT zWTkpi^YP{8^C@n-Wm^}#y5iwFJDp29&g(t1wBK)Yt~=isf@mJk@f`fUtG2$z<7yHG zw`BG!%uv3~w4KX#m#J#j^4XTW{xX?Ne`IU%_~-U2GA0iNJQo~<`iji69s3B$nV2ff*TP_jzp{cw(` zydS0_^@TlTyZ{6V9JtD7yl%I|wPs3sPI^)WT8>&VxLq zN`Xv=yQP2DN@O+{PjB^oyyFHdU3=T{YO6OCCj+|GPm|L>_^#@_mXFPX~@p)%zS*m(nVM=BXuyEV>4>ixQmi98eTcS#eX%kVD{J{NNu?r zrzo<@gszf+l~wG|+ax%SH(Q-?W3@ivqScrYam=|BMmvH#fF9I$GYVZrO2kdZKkA{mP>hlXBXUuX{ynzdL=yHOilQ9;EDErVlj#Ba5opdf!dw7$EH(b`nkL%cE)Qa^U&_C*> z(B0|g<#fvvLR+4vk#4F>kvCa?xEX-AdMTDt#HFZ?R8_o{T=-mR;qMkIq?aI}_})$p z>odMbzU0-qwCloXo;Y#2o>NNakR0Upyw=Xkty?>Jhbz0rmv-DvTE1{DUAAPcH1MF) zLd}Dt>_TY#H}9exn`q%-kP%erHB-suo2I5{$z02=IlJ9Yj}9TVviAyBeJ-m(&{u4l zig~>DRO#f(Fw9Je@N5fn=jl^;R4U}>o9O4^g-)0@*`!cbp9sFl_G(nzB}q#-#kH%h}|q>H?E4^YSO2Pb*&T=%JZFgGH`gd@A6L zG>NMn%X8U_5|(AmWyXnluYTR$+@AX#{i52fS>zp^gQV)sT?AO`Z7bn&fl|#S z=ZxyU>}@mii`sQ)o_R5klqHPIEFA1_-aq=K@U4~#uuX6NWYO#F-eIHm!6Tb7jqAIS z*6O7ClRM7LS1w{X`~8K#y@Y`nTg2HeVV(*GqUk%X@#^UNs%)A~#;UXoh#hP{fNb+^ z2|Asw$AZ)MIIh#oDDJa!{z;r~*1<@$pt*F_Ro9kRt3_rQNa4=w!d3gnq+@p|-zK{{ zpu#h1W=tXTEl;+)_Whmaj#BN{IWv$E!p|p@+|%8%+KZ_Gj-C*dv0i)EE$U0>+3c}Z znl@0^*HziU7kspPhQ`ZQTj_;~s9T0s-e)JxaE$c$^QFCMGaddOG^Olj=A@H+pWjqfyq#6Zw0uvOY(y2`w&rin$hRXxD7psxn@x>W7Y>Hu)qWPClT( zrf2#~-7R@}Kd=nSbLF#KULP#>gp>il_!}DT&%R@BZV^yJEu_i>h`Ad3`-?41V0VOM zpJmN<(ciB187DEE{p9s0P)GQ1k&9f`*%{Qm7~M7A%x~uPv!}_w!-Wd9zP5c`JNj~l z@5=2nNmtJMJXSEceDte5v*!MK#=x;Gm$n--px)bqQ_JWK*i-JUeCKALk!kxr0*lw! zJClE6+pGJT3;weM(b_T9S(Ye5RdPW_K+2VE@7E1?+dV0?Z~OI|9Ez?QpR7oJ3ndg- z;S;YGLC4E8&N(gYXybIQ-`3odqdVVHb#pbOH#|h1Ho3QRvP99WO9r7mh4WJ5k$vq_ zJrlwg42+*Q*_PAfo_a-fLTJ0c3;F@K4iI75%sq6t=Mg*xnEQB7$b6&B`X830qO^LL}HvBosqpm6VP`1Pf_ zOJ)n>evakpnfD|t!tj(8nt&Oq;>S>!zPqdK&Si3+htau47^2SCY7G~4Kub71QCYrT zYRj8PG0fLK{2tO}X-#K)3Tkz;5Aar^QsrXd7o$x6C{8Fkq3t`FM>Kltk8L@c*LEgS zXnmQu+R52|PZTA2amCQ}zvGVSNf1*5!wd>}_o{a0i@CP4XEi_B67^BuHT9$glGaa$ zeuS0)Cj)Z?JbY}`-|gNSArF{5g&MZPM*2^IDzh1&4vwpVa9e@dK{R&$u6(tWmUg+i zYJipZkd@yyjs0q+c6WkYM$b}W=?CCqLkpIAS5gt2dc!gpUbK+Co-+{JyZ-7)Q$O=v*0`03a-SC(Owcfapu z+pL&pcvMYa*)4G|HU+sg3n7=8 zsYR90|CmAizIX44jMHUXEuU^}GpTkTB8<)_`Y)S4-b*RrO_3q*GGEH(opSn}x%X#) z4>+?{f4wS?G2Yh{{k3eU*>XRF?IQ%;uv=@h+wYwgH5d;bG3Yu2m1$WZ>R5#=>;v#} zcMmvy7Intd%t$BD)_+Ce8FQq6W`pBh`vFm_UoVyNv09|er;>@fL?Vs}Nx5@iC z!K>xu^jOemt4_p|mpH}T6?dT99?x0iHv@WH7Sq+kJ3;cxOcs7bx_!&@IK-SH$+kM6fe3N!pHRZsq)Rtv zuKmI+a83~lwhbVW+!xzm6I%Xai={1#tX-hzhDlEK_L;R{JnAIs{UjfSp1Jo(xAndp zlUTcFF-^|bMxj*euOH%UkRA4sWyQnvU`I)#?z#wAr0#J7A%l63q6MX%Ww?-Ts$Aw{ z1UxgWT_QL&*S;4sfR$mqS>FXUEnj0ubvRtA0p6N^>xD*%^Sx|Vk(b&)<{PoZ4}mhQ z)xPyQecm+KJ$isj7S&;}7j^vxhVcyPV6&7X32xPdYp8E!+_98=%<}d<4tYdT&U3bU z#-XWf zpFoy=p0>|0bZQ@V<%yIrO;w zM%zxiUjxUd|5Q~xRr=a?DN82IqndwQz&JIh-&ND{P4M5xvc7uQCNA#z(tWez z*q}{&zpSbm7mRK5GGo%ot|v{MT)R6jx@9d!hZhRF)g6JKwHz?Vxr3=FtwwU>c2KI= zr)_3@HnSVk03KON1VWx^Qq>dW)tPp96%GTOU1_IaFc7t!Jdj7WO6tz{TM}G=MgkIt z{h$%r_Y?a#d^JU!<_%+#W~#V(z9~^*`>am$Cb|diu5Sv(p>w!xvS;y+hoQlHqvmY4 zNR#hk95dTqEg3X)U&}n=(_EK&YN@J+Van=!Q%O1VD8*JBnwWG@W2IK;XL1*Lw&nHrR9_aF~Mt8vZTU4R)M*_kfP zR~4Eq^okL5Ia3nl`O2@aR14_`U0LN&o=L#}*x7xc8c`-wWMhL|jk2|(STQxzHXh(a z?TZz2$!;)YsXP<$=zij3c-o^nw5r11@6gkT=_pq1c{j(wHNhwC2g~Fus}*(6of1!U zr^K0|zRg8y)o$nfnOldYw1J7kL-SL2QExQE@?OJ(ASp~U-OB1w6}lWYv^|RPIo)r0 z`%6&~my;6(k>d-XU*k}W=OD4*R1TV}#1gZ%cvdsaLd9Gj$=wg2tf-G_2&c}&yR>K) zD?aBhLoz7dtZt%$P($>(Z+>>;EMUrbnd0&WgXe{1r~i>uDzCF@&hE`_I$N1I7+IH{ zj~qK&w~=WTu53`e)8^fdu8=E(*0NTYAC{w=e*?~2$EWMyMD%zsG*vj43A^ziyeNIc076bZu@Fm(-7U?a%9Q1m> z>6i*I29CRIMZOhcAEvQ7TYLr)33#6Y(Yj)KOobS@J@_<{Z%(iPN~VEP3C&X@jAF*^ zV~(6S=a`gc8P2>v2kWz;I??6c)<_J>-B^s_Zq3@9c$A83u3(WNOcN%Z|5$wvm_fgbVHI-^|Gy#}_<)H9kXy3@ssz2rG`deQ0a#W8iveChCx z9z7~Q{f+Aa-$~=Pu(zE7+FkIIn3;zh!{?sT`~|GT@LA{t-rqugp@EMYcNPGIde+lNz2+_c3YZbi!c*t z{G}1#w(&cY5f~g86;-ozaL=#~Rf&AON-g7YcRf_AdYLH)-txp;r6ELp!A(+Q%(kLD zme!zW_onFOV6Cd(TBusNZ=(jkcRson=<}-K!;{(_8qWbD7Pl*42*JC5-RsA%$hYNNUGvjkur^rNRhWkT2TK&kJK)9E+Ot+8IIzs=b~G;(9A84_3jfuPfnOW%u&1# z?0r54r2s?IIUhSt`-Rq(Hg}t*EAm$iduR_FiOU?WTATgJoNkjW4vY`BHf7S_c;EqE z?whFuUsR4Ox|sZ)M$Rcgk+}iw*kLcO3Kz8k{`;^EQCEm-Peb65BV!=y8g&DE7P#tDI` zyqs33wZb`s=hXBVP?|#uJPX&8iQy8!$Cp70m)t%G`qBm;X$Vaqviy8)7(bQ9y&zwR zkgd$B;_bzAm57@7nt|Q^8J7xGUIYyj~Si$=YV;q>00>~R>V8P3X-(-T%0bVs6 za>7B=m(3co**_cOPlb18oNz%GKqRvw>wo3hbXV1rPLU`a&XswO_l+@|#KLIYBZ_CzqV1} z0>G7U_DD!*7#vaI*d9dRKnpzvo+gM@3xL_Yu%rspmq41Z>?Jr+|M zUc26LlLo{5JwOz@kJ1BvIytxouNk9tvziMxs2%a@1bd-(y$5${JaC>(*-vW(f8BQ> z6UL2SmwWIvm$49iUHo{218>q~;EiOaxb-+$3(To^QQ{hnI7WZd*jMne@_vGimcz4c zk}#FzlE?#h(Y9tI(@B%2D{l^bWi{u%b5CrxB6hVe)qr?V6J)W!f?49{bi@j7tbs{w z_`wFt&qw>MS*XkI9L`j3X2ktV-UsBdU~OvO2s&iO3H_O4CdjuhH%npdM_5~$%r z1ADjqna-pJ2@PpXe^0eaP&}#1+Hi&{d++*f^> zPv7@)Io?2!}9Lub)3S~Zu7n1(PkP<^e7Q6m?B+zBh+Wfs z#Rc5M4uJO^L`3<^Y4;}2uN7*gfU|Ri9R2sL1rK9)&HR(hzi`@iaI50ZU==U3N1EV1;hM=*%pTbekjXinIXoN z_m*Sy8}8r?GB7rVz3A>435WNbcW4Ve*|hetcb?H+q1t|x9}dX9$>ZL@0?Yv(iQ`~A z+);;S+ttlde|LwahQDYWT-XD3w;?F)zE}=B&ZGc`eS@r=R46{Qza48Hj8frVyz<4G zw(6tlF>{d`kh3Em1=w1uN~-LJ-FHLQIDre^*gKOQ#8QQ2Q(xoI3ZuyWV7Ymt(PU?W zLaVv+DkQ=Po)WS_9=|sh8bQFN$DL`@xpCF%qg<{pe7J$(*?2%NmFktmr5@-e=LSoo z#tv%>>&1dMHROm->+dL((HzfYv9od$@E>;qIw&81M&~0nWfmXo-fb_Kka-Rz#rBNT zP`3%SOf4B^>$}6GnuwF|_Mg4k)kU)Q7-r&)K4Uz@{SVm&iBKVma{Vu8{1Qd!1lHP_ z3gax?uby>pp)1hx4UKpbR%bZgRvOaNsI41&yH-cvq+Zi2dE@Pgkza$P`7|+LRN~RA z1I|5zlw%zHHA$yYEW24J+0x22@!}IlUWiC{u2ONua6CRTsq|r3z_Et2F%CI?eB5ag zO_mR~J#*G#kUb6VKTLbJU$(QB{k<+lCyEAz?U)ON5?jt#O|a!4Qy~i}9c#BRTOsN3 zXdp_z*d-X5{=12-Rh?qW(NSVR%O-I|n4+%iiu=L0S&@_hv9H^SV*5jQ9?uHo5&=Ub zMft9%1kLec-bLywQ_-SEoj!-WZktwS&m=}!BRQ<4IM+@rAFjb&E*HtP3s+)GL!ykA z_Fix^{n+l^C&x>=@0?jsPAEO32AZiP4MgO(KEr@onmzckImLeLtUSicS4gbr=n796 zwEum}z;S&FY%5ed>@(9h@)*8*i-6#sLAG)k11m}hneAuCjTm_wSjR?4$88g@NNavS z3t+2UhN_m~?kCw^;%VqWeB>1s9}@~Qn>O*%aT_DOfbe85()o zmA`*xrFZ-dI>=zX#k zB$zg{9qKBgFm@2kkWFwH21$rkC`XP!#dse(3MWo8iQ0D+NAbiC%LpL}hyuLp>$bSO zDm$MJ?CZZ#yy)8T35xia$+~i{R_zqp4R5$B0Qkzn15Yvub^9%{k%YUv3OQ)h3d{5nI_w8)_YH-bn8d}q)=D4)}slEcz?d<%s!=6YbK@4_@KiqbhC)-XIQ~zR(%p&KuVKw z3(^_#b`jmVz{dr?Q(gBL-xFs9isg~l0p&OZZVj-YmgtB@_XcSrVhTe zklAaC6hg6;1N}=qob*Nl!Z74262*0urv+>8;OV*p=ds?&o+R}wPzLbo&h&vK_$<}7 zpO{Y+Ii12YeY4zP;FNN*3RjT9ZINAEm1;A8$ZfbjmDHr6nxB%ThF;BigOQ{ZV!p{7 zRJVD`z0R8ReNif>O#U)QL}oSv&u^&^1!GN?o;H+%0I{0K4094~+$T?Pbnt_q^Frui|G1TZ0;groZKJ&tjD$pTd2)$w3cK_X0fP|gbgN!^A07w#@dXdh5`i)CCl zL=12Te%08_2Zs{_4^^y&NT_8MN?cR8=kaod35#L16c0&~CLdo?R)n~Y0_HNd9=}nu zbvMN}3(QFZ7btSsQ_my*hNWi=i(5HvM_PUkL5-<}CtiOgxkTAu);s7q$?aKxVdsNV z@$X!pmP?k2j(m{v8G+jA+zVk3PiBOaR#=ECB_u?E3qkbB5&$d7LCkedO`S^IHC6ev z`_xYg`Ke#Y=!v9HVcs^7bj#{(ewxkU+NT`ay}Ixc=lEG>Qn34rr*o`7U_$p;asws0 zbn$E770&F70r4{MSsA&B6Lf84iHygOoIU+=VdPz~!ea4jfA;isyuL9^oP%Z>eeW=I zL(N$n2kc_N)JnQ}=@K54zC4jUf25P_OSI_BvFW4Aar%Ue{%=UZ76TmY@N_QIzHWWU zh)${8G|bqyYKjMc!vzU&=HxCm2{|Lw5RH>2t8DMFZ<{prYjBx-^TemKEU-`l-lXgs ze$$uT$UhjL=SCiUhAKWAUyuarAu<99@O*?#9ba9da4JER|WdqmZd z?HxNQbYByHGdIRt(lSmI1hX0}2EQ=s_g&!>zc36lU>6HEAQ(7Zit9^c4@bHzGBf84 z0(EoYZ@J;jEGY2ifZU&A?8FUiP*}oeXKzht8oJme?so%V4C}f;+lX+|oQ@92=WbM( z3-@O21K%{++&i^l2*D2a)fx6K?GPA`qH-R$KnklVR}?_(P5eyzX@4}AH~@<>zqbXG z)?{Y^|E;r~xMh1yZW%8h)vTu9`_h`ev^GHZW)*(Q0x@ zefBmRJ{z>^(yjPrUO_~Ntqyp$XP8c*G(HFz}{kJR#U> zB^|jd7ic6h73{e9tvs06Gjc<@bc60|k{%WWeEg z*^Vnn^6UEPk5wu|M$U$}$=xSIba`{a_zaMr~+|P8J7~&DEOn}3s zAu$q+V>VqVv5=D`vml3!@q1-Tm~cO-gy|?&tU(|?--73Uixt?pgS^-hh{w2J4`_c( zju6CnW8pPU&a~_V9e5uVk$pWbV~@`Q-r_iNr2mbCci4&H_CNl0J#E}75~;MtCa;?{ zN8f5@i%ffv{1X}vc(bi6{CH(oZk;c2kBP3U&qo$F;xh!1-VbPSq@{XB9Kr#)w70AA z(gu$zv1X>rgCpGqgOb&yh7F3MZ&W0NlC_)_*5CeDg!c-h~2zd${Z`yWo<17V0o zNagz6x z#fnl2%6MCF!zKlci5i+{+{Px|ut(X*8oI;E99!*@ctp!iJp{!6Bi2xb+ zKnvNM`irnQ0CmJY(Bz*kx^#Cd5H*c3#~59R9qVe*=aCl`CwmV5i5C`2l&xxOaTG;H z0}vu-EaSapWY1iodklpY;UQA41!V2~l+W>vBbx{CxP`)sd!9+wZ#Ptekk4Ypf|;Z$ z%lyrcVq&Ssu8HwI1`z6rC#xKb8;L0CYAm3vLea}i3$kVNkcpPpU=e0SFc=$Ip~N%l z18K8tUop3I7}0iQWV06uoW~?iL&1$ZZcC_e;IWTnDyU=_(Me*?hP@g-kThpQivp5V zs&B+|qdQ^EW<|}^wTESYm=EqXSf-QUHBpmM2P%DBn61~r;s{A^RX8}KbrPm7Y!OK> zEn+Q@&9`wbJwY~gVzxP?A7^#;O}v&db!lk&ejSV1PgqlMQS>x+;J|rkTwSOM7f%2U znj8>o3ka1pz-tM63ugqw-(sW9xO0Et-u1)Tb+w$3B`4iIU;OJiG_iMN?uO?!fNiWA zHx7+<7-|CSxVgCO@?gtPHy>B_cKGS&Qj`}k!;%l|iL!Z%(w{Z`wh|$%&*{X%iayB_ z2sunMzXSsZE_llB^USv`Wp<8(PU|q?GN))iM5h^K@7~wcp9~}1)Uw;lZJ_FlG3p~Z zg1fVPNw>{s_&S5dnIRUghg~3JMd~Ka*EyFC_s3UI#Ea_A6SHit!aK2jEZkGSEmM4J z`Pzpq5EI6h>?5|!6Sn&{@O3N03@RboVuG|WgE%njxt7fBVVcY$Qg>{#;I}p7FD?`$ zl9UY@&-#Y@{29ze7qD`v+)G)bplt>d8DwWJ@tErOEzS)p@MIv{)v_)L$j7yL+IO9f z3lj``$;f%(ZiLjpIhh|>QV}IZm9DUOB=pZ0;`W8_RJ{ssl&z@gx#n z^?*yq?~zq19&gLKH(hRi^RsR)$TuSw=qYadkpvzR)#La(Pj;|n zuZvl9Ds$ty;4=|7e#ZIx2ze{=|5M8W*z&xRGZ9N_SY5En2vhi1lyq_ZNMTi`1$*<@ zw}J?)2cMt%4w0dtr{9A3`+`U&25>X_fi}4-IQpLPKG2y-?QKLt>Sk4+ z0BB|;1Q27!-zX_W1kx36c09zT?pbYB*7A5{gG>0jPWb2nZ({utQi&D2C>V0s6sQs% z>7qY;SwU*g!Do>J1oYwz{_U>tWOd3CPGLH+THovj7!TEBjAL-u8L2_MS@;P8nG$Mg zHM6>-%ZRyU3l4_P4DIx6_Eb%ix`v%SHTl61_t0>sdJ>Lskw}a`@Ow<-Z8X*KB?w+G z=e@7xT-rl0C}~jOPLhsC>*NS?g+pN)TUJu_bfiQvY;e25#A_B*gayXyuE_^s!cF7P zgEYaV;_z;&C^>r^#w4CrlBR3==*h}&%L~5Q%czD4Y+amIt_gL)Os-2iy=w(E5@yo4 zKG_*HnxkQ8OyNT5{xN7vv$8GC(WMkTICxKJ1%cHmKH{rLC zoRJq?r45M|9Tu6Rw` zN66t63@EIhvRFm#Kaw8w)5Oztav{4T)-5qbw>90C?|7iPlgxUzGP!3Ojl5aplN$uP znjpKB6}Reub+Q?b*bZXUHa8p0P_STM*PRAppwxw5a2QT}aCSbR3q|1Kfe=}~JlUe^ z-e&A20BT6M6%bYC3j6QMek9f^RyZ{=H1M1oS^92i>!xGr1*++15IV@tgE1A-1(7N(92ubV8!fNnG9XGMKJn^3A*Py>E#u}r)0Mo7u)Scoy>Htq-#b*?B7 zeoDY5_O7@8t+~PykX0=D+@>dTAPa~`lEwsqI*Uz#JQ(4;ZcnZhcm4X`5=x%A#07TF zgf+j(kTEEky>hYKII}OdBo*%r8pBJ+7L1Iokb%MKGMx|s?^nD6?-3_Cey^X3i1k}d z_?M?VvI!7ZW2eldUJQpLPD{+Qw*oW@1+eWiGDmGCK2JFSYY~7+Fq&vUp7nQZXpke; zi}e?}s@W*Rbo(>FIBcU#4ZV5cNQrE+j^amc9hd@I3}H))-+5u3OT|Wx8yU9L>jU5^ z8E;>P<1LZ8?Vp}y4F%iHUMrzY8;HvMtil@Bg{a29e-r_pcma%JC@O`of&ScgHLDi| z3fgv@NI)1v604fTZ$>rPBQ6YOaU)KC0_u2!OL%+lCw5QH$c@~S3pOb1>dg4?hxy0= zy@H4;WDvH>8big~5*53!6Z1*r^U-4Q_^=VGoN)91;NnOT20O733bw3dMdK=EdakNn z9yJ^&d+l3Ra+por8e+&pgN+}T6RD{K+6}83J0H}s2AoGTk?)bCIwp>BUM;f;A@Xc7z-np#p zQ0GCgB-OR7F?`AN`G>u5hb3i+Sv+oifr>9~o-xb462xgHUE|%HSOmi&k&!^}<(OPy z;RtT8?61;z3N{u1v3!6y%6wk^UEV7)WAU(qqK)k@1DjRfO(5D80^Q>OOS-cJN3kmi z!&`zBgLp&&BoK&s5c}Up`=k73yWLfp65M-+h(DhEi)W0}lm7)RE~O=gr-Zn}6Ponx zQ%_Pe|2%siTHW6r^R0=+aE+7EIJg&tL zQ+!8`@EYo|vpFry`L<#W`$h{rH7@my^twn>6`5ZRvUY6HxZhysZ zg6uPxqUZ4)T{Sl+n%~A{R0jM3MCbA2{iWc38P%_77q8oL&~0T!7xU#6Bv+w6{{5am zf9hPW?e`L%(XQI~;JWpHO?cv(bO%=QbA49tSJqswS{U`Emx(`tVB7JWpT9F3(%kDj z-EXZ&_w#CYrGrTlM8MMhxSpsKO%QMxiBX!YeV~k)aAV9&+P3X(Tml|S&A4aWP|MkU z9uM-?_Ad#(N;_ub(dSGM&iMQ3NS=(~E4!;K)zH}MN5ua9jY`*{6!_CC+(kxnO1`i!>)9;GT zPa9^B6qOhgU?CAYI@ZduwU+MSqHQ2ang0MU=5N+m$SBNhB8elLv&Oo)ho`N;U|Zy3 z*~VMGnCuZfnXBAqF<@BPK-D%;HXXFD9p(?-T8?QG@yLRvFk}V4= zYn2^l`T0Z3X|Gjvdg71mWH_D1R82bq^N_MUr#!bm_UYuo4;`C6a;X7Q=UY9u>z})v zbnnD3i~u+XruqA~Js$g5dPq$j-PZgP7?XD}rboZDPovIPR+U~1yvMRPQvR&kopB=d zeYu@0;M>O5WBD_y=#g+x7Id8O;7t@$UiICe*FZ2biE98s-aeE%iD-cof8`g6<8x3# zW+<&j^&<^VaU%9kC__|p?FURCk1tx`Y_DAz*RE+zt_xqFp{2oTRG|-MznzCqEt-M3 z(j(_y;QYvw=ZDdF4EFO0i06jvtT?UJZ`S?6Q{}cP?ASTtp3E~d$K$)-=uvKc_2UH8 z;{72Ou&Z)gEs)qeC7jz7%&)Ebt3NIJ)wXr-Ka7Fs{BlCrTW5dqJ%hlzM<{p?gpQDv zC#t{cU*nFZI9dOmYn(mf&DO6$FJuNX!}*q2ujT|T6*CpvGd=AoLta7iLlX#p@nf}T zEk>A1GG&taM^>5sy%}6g-rQfmTe$Z|He~r_f9qeNc|kc(ZNYSAC_@XEAr+{gy~bnm zeK$c~mDruz$ZDcb#wCyPG1;pG%ZFWI|1_W?)H9KhOwPO%>$=S&IAN02p($CL(+ zLF%3C;zmOT{6+NqAKRYYi&Ug_Qlgs0tC(F;K5@ zyt0mUL^X^yCA39XAby}! z2cGKZuc&FDUYSbM>;H#IKnUf~8=4g6LTgF^!iZ;+XjbN0Skfk`?dkHxjAU39SyQO;iIlC}$XdxcL`x4$jgIUre=-+2gj^kZ_U6Ltz}{V``m z(6IAv<4{w6wdq6z40mtXy_}Trj2h4AOh8i~wXN*%j0cP4>+%R<(}%Bz0q_1~fMbE> zt!#>C3itE2SZZp^Qz&ds{3 zYG%bvyg1Tq7EgA&VANWJKG7o0Njv-^tI^lnl;~#6oxzf&{mpu-%Jk{S5dREcU5p0a z1yN56o)UpjYV7BFD;Q%Qd64ZvkRLFpAApfQvfQAG@w|RF+m>=)bTpJ@*Sm!Ar_(Y$ zhlS+-0agEn&)zooH03VCX=}>hsk;dS&Ifs7~Aq0gm8$&!bO!*-htpU9xe3wf_3Ehu>mv%gm%s)cx4>ZnEwmQm0{ zn~L;4-d_9%Yzf~dw(9pHx`932*yUoYrDqF3@~h)@x1q4>*wi5PJ$yax_eEFR{1Khy zpW^T3dEjMr&?!TBZRk<9DH;yN{4Mbr@Tg!CPan#Xsp>|!JJVl!Pn5+yTRs4E{p=%q z2Ji(|bLo@NFDRPdK+JklsZH4h(qsWY;n4kF{c;A8g(GS}7(!BHP7rcq^MK6orvK)b zFilba7ZPza^?d%3w=rs!%-`W-D+%(Ae^ zUcQoxt@7iRIC1E`mluKl$#BT|EpMmGJ z3S#8xKQCGF=d*zDJ(hJDPj%nBx(<@T>+R&F7^;J}ZNsXlU{6QK4>b?lmvi~^wSqb` zgSL{`;`x-h0Q<5ynp&^KtImT7aLTc5=$t>XW2Y^@WZWUi*t?pZ;O49a=FWi8oJ!k= zW)^8?nT>6JjCZ!F8{mSnw@j}a^3(5 ze{WN=1J0Dg^Hy;qd=v~7zIw`)uiypd@!Vken+rgT9X5w%s^+V4d^;JJ?@eBISQo`SU=5*$0T6<8Sh;Q&m8W?rJTI66{n#sxa{z5g(`F*5?16`b}*Nv}7T+cU} zW~(rG=70RG^xREA;kQ8;=pOMhZNA=@-=ed;^Vg{i*IWrb%+zUS(9ds|97N`CGe{&P zqS^2h-qmNm>c5{m<(l+Gj`N{xP&N`-Hk8@nbN;pMCw&xe^Z5COJf^YA-p_9zr{UuW zm@DwT4iq&{e9jSmmcl?8$Jw^IF6J&mxH4@-=RMY|$fV)V`|K(I(^ugFG|ZHYHv{ML2|ng5`J?nB#DT_Q z2jY`bBZld_gJ$`d7h6#`^3W~6hyJPXoIPseB|1?V5ACbzmFlp1-sC?ieQ+O5bP%6cYv$h+k}0`iaHLoeLY!`KdU2Nzr$lQUQ`@nMVbE& zr^a7yPsF=>;(E@%ek`}Ca2pTHSfNHT{A4f7cOW~W8F3U1waoiqUVHcLd@E?pk`aVb@V5L@lx0tPv3@G49VcVWt>o3?>Cc|A~z@sT@^*&*t zEl37*btK+Y;(_eQxFHQwqyvm-+<|$*SyHTK*>meV>d>%eV~gLtBJfUS`N&Lb-osls zktq%^ib-#eEfRCwY6ANZNf3A(QzCUNeA^mpu>ag|a)DSa5;nJU)x2k?3=358rT@7| z9=|*wylpv%muhPX*vB$ZbVH_Qt_m)5mC{3T-jErm6KBN%?*421Po>|C`kdOU4!=DT zYOwcE3|GWbH&$164QMP`UU5hh*_c5Kg2^#97ZV%=A^quU6a!@=kBd?xY){DNw#m}{vsm5{F^3L`9HYq(;d9i zA|PDPO#URS-2OCnX);W+_rzreg2I!vmSeF)`DhOs8UPDzRWxiy_&dRs#emFPDhk7# zB-j49_uhBwhMqH8GezD9lBj}R{>D-DJhPm>bQq5pNySBHyuTuf{YP`uN4!j*PdH>- znhkfAWp>q;Z$1^@NBqFYDPCa)tB2Y6C+`Pd(DsSGZwhPj-ZX3vL7xsqd%E)+tc~apFf5aSJ!%NNCFOOpiAMQ?*5Cd~p;7d}` z&#>1hD>W*?n0?%8f_CddNRJ`z?zB%1Y@ge>R&` z3ZGuofFX4DU1_R@JTQwOHXBeW?=~#^wcsQ1M{Jbu^rz+&fDsG>>}~?K@A3UT{+4r@ zf{46s$lb@sL{PC9GjMBok#{#x^E1bsh|Ltu$#b+4AjTX#XGKdbU{o-4XtH9!Qih$& z{`1JloBhz%bvMJyf`Vr1QSxA(30X<{wZC0Z zxEHLEl53k&5_KkUqtV6}S$zg8a40VEFWjr=~Ux{6CJB%mE!pkDM9U3aM{`W+C{>SC}Fm4?N1esU9 zasR!@&|7xqEj_TobfQ|AyHu@&0l>=!yNc1(yIU6x-#QG6=5m?3!RpFDr)Tc_%=mS16^}3BHD4b`?Q6x&cdk|m7zx(6>izM+q#vZKj3#AG#{@J!FDZ>b(XVS;-F+5dMVo0s}D55ATgVYN6dd(OutdX;dg zYVY)hcUbge?h*OJPYLO78R?V9pD-vlssZO{q<|Tw7NrRoA5~~KIT^P;m-lsvgVnMB^jZ*_I#^07YAY?>^ItAD=~4hrKLe=XU4CVKTDXM| zCV2irjv_CaUv+~)O>c7N*r^7V>W=XCWTmK2E6yv!aHSv5J=t4Wtf8|`}uVf7xX z8)y~nGfG~FWx+FRBoaDDIo(2n^TQ;fSa4k`n692UUny0-1C^~E{@?jM5DBu>jB1dt z2R47DAQB1ZCo-%BN>Q zEqmdpW^W~-nC+Uvn=YZh%oDn)sUrU9+V5ls_U+5c*{MT{#kTVFWCyb%SVG zIl1@`8_(pEzG2QR59;V3X@@Ex+6CLTq9YEU!chBY~LZ6=>ZS_FcFD1Wfdy zP#_l!c(?NGKFus#w#`uYWCq+;0 z6l`qyke#!EDq<1w8*>^joOxnUx}frRR3OP!$p}leN`OtqXvcGE7nzsTw*z&xG4rcQ z%nSBj+OUGMx*zXJ-Vr!JtQ_iq5dx$B+oyJ|=8{d~wJHl!pze3v_QA+lI?g3Gb`sli zyTo2F70-Q7bKB29sgjVm8o`?DS%959g#-osgq80#NBM(|nEorc9}SP@NJb3SO1=>~ z5`Mq2>xJrSwPx-uqM+0S7qGO$Z6i)%vuxVw)=&|MF3JWjHgz9x`6E2#Qt9QQW;Ld? zo*3I|slp8O@d`et}b&CzWK?5WXAI8bEd7eGvq~BkHCu;W7h9uDr9ssK$A~~ z>*sq7jxyN`-=?x-d}l)}jl9nvk{9}c2`h)V_$`^Ied&@b!!PBdm@nys!bX;?K=5+7s1-L=ZANiuPC#RMO%St^c z&yG;!26PY)!F|@-fBT%U+gN@>uwMZH6oIyMPx6=twCVwg3Iseb*Ig(?n!u6yYQR82; z@}I+Zg>9*QUB4YOi+AObjDmSxBTRgis>)t7Y72Z{SxRf2DXifedZi{hIVq( z*_85(1AG!fQB=W7$0yrO3}9!cVr@n9&3-ST(qKZ{wUA8q!vJNmWMC(jPoy-g1tI<7 zZ5u;VJj&`y)o!5-zU4X3fqD!dNi17v-8veTY+;L8TFr%q;!j#$9rc9C<(Xhwn)%HbNrn2xtK!$4t7d9TM16 zMktc_eWV4zJ}ea;8W1he^F4>7DX6M15tl5UW24Tt;#n9!U2%_Dog;|Bm`t39QiDzQ z)^(pl1KPCL`OGN9k8|jk7nh)-wD>?I?mjb>%D835nwC9Yc}O zx7l!-Z%B$FmqVaY5Os#~&U>m*{`$ne@I^M#yg0G6)#_-Lv3c6!K?zp(-cBBUL9{JFX;H@?=FJ*xQ100yrvT-n{1f%V2lZnzRhsVCSe;gbj)WBz^291=gPx20WIfd zKl9w?X`Df=THp0V?|OV+QTa9;ir0fZ`0rkYVJ}^C`7=@q=Qf!6D@HevJ^P2Ibf{j; z-(*3tSU1V^Jj=Hqr$fFYTmAXlE=bZPHC}&`PY5o3adimC!sf>biSttQm_s>PDzPjt zLOt?-43hGaBHC@@{VhkSN4Uu>xD<8!V;&CMA|wOEkjufO9-D90%rKFOV6B3qX_MYo zV4ez`e%P-1iv8A%8ll#28PdOE=*?&*#J}}%e+EwkkXi9O{%*a>C$xrhzx|?c9zT5M zJZ=Ib^D0~R$iOzTR4^dl8F$UNgbsaw&PNiNPq%T&mhZ_^uXX&Bwerw%L;br(Zlbb8CJf|RI)5n+K1RH zaKHAkaVpzD`St9TVtQz+{xW{6E3aX(K5Jbm>4Q8nJ9;$Q{q7hIyEl z(c@$j5mPwxpAhVv3msDro_G|OqWACnSYGiqJY3fnK>IZt=(}4A=PS=uy&DF@6%fgb znbNwr5Zn6(GIbLSvs*yOKp#gJOt0E3+M&MRO&%u*Gt1a|YP$>DjfxIcJ*E0Szf{sT6_%?FQs)nv`)`L|L|C~DHwF|pPORfhL3Nt4upmBQDAwvv^vOf~16s_B z&4%k$eVN^tb3@X*;ejv=P4(hF`6g4-@OnwhNk;* zr<@#Q3w;{|q)A?je8h69OVh9MSYs+k93(w8V2CjVOkSO9cws*(piXs z1h|YR?2|=j8%C1DXD~o}MR=Sy)thj2^K@gJ{*WKdqebH~iZlFf zR>jTan!;|3a}M@1+CW`QvnVnIm6E*{Db2e0B}tSQPlGjjaSFC+-f}#YDw_VqDJ^{b z3n%Ls@E~Z4$5C%q$N=N(#-0}7ZTr%)|7sl3^NPMTu!~n|t;>D@LVSFcuAhL=x&p=G~>br+$>$UR%DDN zW3#QGwXw*`dP0Y@m8vEX`uL&{5@SJmSCS;0Pb*;NPIg!XVxf^zSv3NJn@?b3_~7r4 z9!ziY;XD*9$~#Xwx8Yxz+-?jH$?(jAJBH~q7;el{&)v{s*{Pb#FgU7l zZHL#f+#JFXZ#BejZwzhY@rT6Bi)f`;rW-|MhoL;>x~Q`atD*nu1((0*8(iyclz@Vw zncJ>2uZ5`SL|~Az{`McUiGu)cGNu;^=FBdvwd1gXMDK@Gu!mU$mv$VizQ`OI6s!0Q zUl&WQEJto0+uykPN!wX?f7;5B+rI5BU!$lR|DY&1(8A|$*iuW?BJjU8G3&2crxY;| z({cX{62A^d)MCU}fNpaO`9DY!5+e7h(j}%ICds)Sd?j>#ZT(1@8?Mp~`9j#SM#%{z zNhk;AzFfqHYJwmC7C0s<;lPp<{Wa}WjL%?KF_ka*Dl8QV)%sJGAft7Fg#Qwox$-Zn zNb_ky$lT7gPfWChEqWDit<;!Do<=eAwM=48;4U_rP$BO(Um3c&8FDzM~oiM;4W_x~@KSOZXaAGw=J>DfI zwZat3y6HE#7B;W_ z6}Y6*P&i7K>wYZZ*^!hLNsOW?w`mhS1$0dIU5GS@Y&4}Cg-d8G7RghN=3iAr^Q8nm zavsOb0OcGdYOrUGMdSYTGWFY_rg*XicXXX|q};(!>||Y)Jx|D}RG_n2$6t~_ zxV}5DU`i%c%GEGiKi|0);Y5>3Wsp}7Z7z2`{ewlZ#28O-zqMWde&VQPqt%gVeXo_Z z^TfgWiEf31PF*MkiU~$IutF@0^=jNa?DD5=dD_np z{q0HEZBh9P%b)X%c3SdH<*FhNxLEY#X3TBOTU%ZINt9WD6x?Opz_-bo0ZIXJTMDmw z{3CRhVvF4Wy}jz6Ot+Lxnz0*5OeHlhjYyo-z_yF2lU7Tm@j>%|I}FU-rp*?Pj9D~S z2Eetx+qw(scg#~EAof@NxLe{({pf56?Ve*bU;91qG^4-4%nusM`aDd-z;OknkS9D0 z`CVwUba{cp;gq~7n72f*-3L+UFa5==c=YA>B3NOwq^9u<0uZG`4!>!L$x698awWNl zY8~-r467snAXhPgmg1lILm*vBQMST)AD?*Ec-^vu*z^^wt9@znq+)x@1vV`ypyCKY zIQa+3tA}#;4*HS8Yz6-3`tBwvyZE0QO8GIwv2c)KkkX7WRJyf)!#@{o9Y{*)&lUY3NBuAz0(QzdQag^p@f zqRGM2eSU-Rm@;^zR(x2=5`?=BkO?mH#T1e`f{J+N0?pVdW_T&%e1T}>I*PfCMVTZ( zX=J(Cd@u#TtjUouB8+#0I(z{n2KrFV@jdivlA}*DJ^tF;@|*9!UYL`wkURTk-2}2Q zc5%nRY*D43%OAqmhtRYjt7o>~)b7YEEJt^IP0rE3ZA?wuwj;V~18=u87(?(C_!t8Q}^KCWdi1HqkTRrwqRg>k8tTo}gM z%sgh&0r>odtUIp64tX=Fk=j-)ISZ!+_9lR7M5~(lx$)a6C7jp^&RG^iA;OppezQwoqA(Byx5cu& z-`cA!NZr!jzs!taCf@r*(Ut;hG%%|cHb#jVPN83g&e;R>cO{#M`F*_y6yT(2pZW9E z^2n~ur*7GB$%TeL)S@4w-u-uz&HAf+^SRL~XQv#*e0z~>zUmt-r-m;uThTlIy?)|e z^zo&p@e>xc6^~hu0hF$wqg-UiuUnq1rhe?81e0{SPf5@Xtm+u1r4&{e-58bF*;;Lg zE&m_F(S@w~vRv&Yc$Ya=PIi;ShwKr%UieYqUIm1i5=@0KZPJ;YMSYbymJsBnsTnLO zO}+=e)jZ(Q$DDMJiP8Rkm$3*!^&0LIVN&Kopw2+=X7_S>HsmV_IhdLgTpJ3sBZhy^ zx7+r>7+)HU+t={+&cYnW`pSp>ZgT5CyWaJWI?|I%r)N@VoerM+q`^Nv-~QlHymfts zhigb63mx0wcDO$&neS>ZcuYQln@F?oS%u>h5<{Qx8-LIA>c!q}t5=jd6m*^BI0+Dp zOv-w>e=RTlo~z{E&Y!awGk$^%H{#t}s)oVWSaNSRs=6wuq*N;mV^flY`EVoM0mil% zrkUx#?vZ9j`Ay+n4eBBOtj1r@m?7gBMD~DEsy$Y0D$E7a7tDNiHC~6HP{}UO8_o+O z%ZuOQ(dQOwY6mGjn6+T)%U?)sFv&su8?r}DtTIXCDnR_NU+f5%z9j~pEd`QkhO~(P zM0C$6&or~?-;7sd`y6d$W5|ik&qF-Sf84sgi1$T{_FL{c4?{K~GL>}QKjX9adfZ#N z-zy=_eX*)Q;(W)9e2c`f&C$hTgf~*oaaHECRoY%5{aP#!dEWD+2a9pvhvq$0>|2|} zVZ6OQCQy0(k;R=qDY=AFBU!mNEItMJh+x_}kqH1PbTd; z$AJ5^#Pi+M6||TvV7NriCrl)`n~zk~X61x`VyYychoFg@m^l?*NM3g84AH%Og3M^^ zEULsxJ1L(XfdBN2DPgQwsSL)MLmto8L!TiLM~ISunzhwo0bKM4`pY@!>d+Oatxk|l z>n{UJin;4K;Zpg$lxV*JVRde~jPrwR_-03Z`mtKW&GQpZhGwcjMnLUP#9Aa%j{@I~ zJi=0R01esWjciI!&n5Kew)>sLE8VTON;b3^x82t0xLQd3FkFOT!XS+P!p*z)DF#kK zfgsaVP~bBUVxJ1I;gv0szi-iBBH?h#-AqQn;Y~U_Pyf>_N8Gu26ctlz7tOFuVfU7K zfOd$U)2Dz}A>~Ez6n7is>NtXizNHbh?bn0|sYQK#Zz5`rHQtd8IP!9sSPRGDT>T0`A%DdwCl%?*FXn{ z73=12=VW3)E^0QqoZIBPBvlw{wr&Z!?9|s8Y;pdqD%3+>;W!*LsVq6l_Tdu{CPz{O z*0fYfFm|-o5x_WHXSdKe@HIn;AG~y~@b7KMe+k-?U?T(A+An!L;ZpgBOY=!C128VT zSSVPPO!O)q_lu&4y_=!}U-`xi5buYE!O2TNV-7wdXY}=4D-8MIfhOaX8{%I0`YdkW zuT?`(N?4?I5kk${$3|N9JYw+PM1REi{W(pZyKelN!P|NY(iT(jzL-=BgztfYUUz(f ztT5_`JJXu?n1SkCEe~B;^;I)F)XYOSlSj11%XECd)xXCpr&vs%`ZX=d>p2UnZ04|m zzM{O1tG9H_^ls_$cdjvqoZ^XlSEkZs6u0t}(6^qLFDk~KHG!Jydj&*8onGI|o#QsP zE0ug#YkrHCXdh!-1gkw85WN^bi>usP3Q~=1ark@eXLksOf1f?sOMB4)5UF9s-gKBk z#L%hDaP5WfYOgn%Rr!Y%CXzj0EClc3U&QZVC~?;Zu}y>+UVFfI^IU!{GSn9-eUNu> z!zbt$WF(_p!6~C7V;23Gt#r2lKYF+RlFS{RhH-9bs^dx6i|Rd}xSV?gH1BHDBb$+EPA|EhyG+CS)I1wwPvWSQ%n#)3yF; zpS9-|cnjqUa2|&c!xEVOgA_;v#=-oT-Zfw(+5X#S-}XBrS6S}a_nkf-`@*TmW%nqf zsigZ9zIKSc>*A^i^_57dJ5}&qwe~{tOYV9nA`H>LCS|Zq3sq%IaeM!hXHj+4!=Xw5 z7$U;-Qt^HeI-eD3=WibdW|M6UkhYKDsj06jz{`)e$4UFxHjzXh&KAlVjC+uz5yyFdXFw$*JoVRdREPrzthr2p9w;=QZN4UG z15XLvg*NWwuh)x35Mvs1OEky}^Crh=vA*D4{@wvN=KLMT;DL|b6F@ZRtB zRfOeM$Bg*%A$FZOh>GZt9VUGDZlG{X zfAnlcI)Rdrg_=4^xTOygsV%GMaJ;=_uRU%=Rq!8#5JCD6leA_ab_(X4y|B(MM#J4C z{ixI6x1nhdXD-Ly>hAGwn61Y}U+Loh{yGg3os!;?vVfg*P9=UyTA}UdTB`qW$bjyb zs|m2VPR^8JV#t}Bsyp|eohm76aGnIk!jXMwQQ1^&-c*nBKAaE9pTc2nx9=>zYjx07 z3*a3fc@R)Irgit^%IrI)S&5+F2-Zm_g=91^-s^#G7gXW>GCh%7J-j%M`q4JM=}h#P zjgIo!;^^p&-oJ>-_y#X$_TpC+Eb$wX;4Qt^flFplCV&?Ak?Z2 zA%US|W*F404ak|?t6>~~ zuLYL@?a1vq#0B1FPqG2k?iW?B-qAb4B5=4U`P=j$@`=>SGMxRgUC_&G- z(vwwiVcFb$Ig69WzO8MyM`-+y+cL+Jl`0_gx2QOp&U?c`U{S@`J6Gi9E zdgJo_eSFNzgrkuln7eIoau;vJ>%HFJf|qJjr`0bFj`R{~k;V82Dc6ngx_-jFTexV< zS54~PcpLi_JNczolH75jp4@$WoNE~`OsR_!HJ?q|^&uX&G zVMxmjTWgEzcg%K|J>l>NvsL4>PYt)+K{oyFTNMkWvw%5SdixmRK={CoowU!V8 zW;6Qr>a?vIXUpwcNbK*IiwetEVKcIjtfL;kkRPs@H3FB#nyjFDX#9m%lkIwwt01$x zw_lTCN5&YGDk8f0TO{5TJp#Wgh_UdxIr#33FSMdjJ)8)buMj!ZT^T+^k_|paHzWV! zzM;-l1`jXVMZ1pM{>&zsYS>`Hi?3|&yJJnqSl8>~u$C=S#GryTctA4WWbItGx95$T zkFdBNn4cC9~`y+JB0b^sO|FR~y%ZZ%lH{BRy9(zu_BPDJQdS6h=-UqZiJGWIk1Wn-rY3TX}Q z5OyJ%Rth92AfD;_cfZ46CXf3@viOsL!uD^pvyU56&V&u}Xk)k9CrwU7EDnm5A`{j! z!kc67dO^Hq^(UMKX+dkSDxj}4zdS}En;R_hC?lPX7Hg07Ab4J;Lyi^S?fw;tNOzDw z?_2lHziTo-66U7aG~jT4PKW~qGl)yW%ez<<(~>m;OT>K9d3(afmy%2Vpn7JSuP_jk zy?c6bRIBVgtq`(D*fu8%$5X6Lu6?GJD76;Z)0PQ02|PiR$*mv&LQXnbtn^mWZYfz{ z8PW3u#Wtgy-6F! z)1VVt>TcLiNP-5ghdp0hL|ZWX-F~XMR>*ZTTnPe9q9&`~G3tiD*#p4G#b8#_ z9augyQ{*s~QCs~!!ENYeM55TzdHpf$MZ22v43td|6%<^|wV$*c!n+id5b_uiMTO_& z)JFwL+=TQp=9cd$il;w*Br7Egx$3o2X*G_9;@Ah`6$xB7S{Of_=UJ=5H`%O-h$f|K|Qo%xLug~p^SFOAbf-r{mlyYPEb*{!BMFT=W@ zz2*BuaIxmQlx@!WCf@M{D776V*?uSp zM~2q!W<{0N#dE($q%rJN5EE-D#9}MU#j@JADZp~ZF%#!T*UhFjgwN{P!l4)4#1CBe zRBDrGr=E0` z`2P6|jbhFan;#!MFw0@?(#Zm={Smkq$Kog`00b~$@w~M>eKr%TMBNBb)-%FQvP7t1 z0OlYc9&MsTe0Dzj+wZsjtqmbKXdL~~@IwjuQlUu*C(4#Zry}eWz*rZmW&?olZh}Rn z!<_2sP*$T^X8=+bv1)b0Ep%id zb{L5A@#prV5?2Q!(Mu^87rFuZZUrM!VfA&tpppcg&o$ZR?P9-fnM52)ZuZS zk3kmXDJ-a)-z#11&_#3k^TIqx7>rIE;+&r}+&PnrJABX$N%z~`mH{WdG;Aq|C$0ec zl)Gu$k0LPq=@fqLq$)iAD7>}4BD>~rvn)RdAHpkNG8*ZHct{QngK^)T`UyyKpB7=c z$LFP7t!=W4fZsI|b9lrCh4=1_0$!^yKHR9?jr3P8*ddV(O0;LkqBox1G;KJQiMb6P z@D=@QVez5Fq?qcz^Vmrr9Wq+*ZrMvQGeACTbj;~kCI%$?qrug?eLx_1OO&l_(=;S> zMoSr1}r)mL85C$ifZnOINA&gr~xYwv5b{oI#& zMgZQ0E(#kCkURfs-kME9P@F$&uo0U!1jFNm!>ZaEKCQ1_=^ls@_BhsJx+`MNx%qOA z*aO{EdlKjJD*`=h1=7b<9|6x|rtd+TtLSf$<~2 z0;qqk+~5KV(m&<#>u*v7&aD^|!AG<+;ss1WX4|qP1eCem*uk`D15x6=4qp}!KWdtc z6>3{>3CXP*0P~CZDW^L)TAFT_Ov-@3ODN>nNu^Ha19-4ycx%4aP{v(d{3`uZB4z5V z8O-Gu^B{S6`1WqQH6!^*#cF{~5#?T^ouvL=N>%|I$z)jR?7eByy!w0O%<7o5|9*k` zTk4@g(_E8ioDaFKDb&eDYMlH&H4J0C)@Xh0ArZTK7Rz?Z zc93q?$q#<__4`rnvOe-VcLGtK2r?w=_r|+C43eF04aDBjSrZdBlFG-Fdx@!KI%i!!ROf{$MDX1XIq7&C`NG2Lp-<8F_ z1WeCckx7?`T9YMeu-1FImleLW3&&1BuUw?m~qTS69wzl#LmV{1W zih)nEY;==i4_s-DwRE&kT>c#7J+Nz;0EiK1{h-Ni+`U#-3-O8_^9P-c%B!|BxH_po z$0*rLGRvZ_sXq&8%Ig(XL6}wHut_fW1>B>B9ru?k@-7r%2kkz&@fT4N~9vX+S!s zi(!$}JJ|Bpz6?WPjfSj!Ep^8lIT)uGmwqJ9RtVUcT|&^U;6=N!h0a&9EzDPu%|S0Y zgo=q$uVxz5PlAu!vI}zA-an=%NgR?+Cib_>MQMJ`V!2!M5|WGjPv`ZsG~5(lFRw&n z_^2#IL+lX<&kQZ*#{DGoX6?3P&rSqG016wIU4%jrkchA_|ApXmtQ%U)(-^xR|86p% z=1CDPP>oK*5gh>x1NF=TD~H{-@Jad7F6;)yNhImLuf$GEf6y3x?{B*c2pIoQ7K=l` zz&RZs<))*edK}`Eff$%t92Dz9c>g<2mc3+pTE6{V(R;~i0%4PC7d+T~K`w$$j)GVI zEPM9UeH53*mu5-jgOJ-|VV}+7A=DR99%N85qo%VHtK$z$DQJ!#a}v67OFQ{kq_jfr z65_1d)#?)`f$)R3m=hN@sj-MyA~?MOMg0WPCM`n%K(!pKXrD$z{7p@8xf$NMTm#DG zBQ$|u0y=%`9*iP6rzGr-kkxw~ZC2K{EOzsDA#$puVIW!NgkkUVEqO8lc37^^_NqgC zf;2ih)Ar*Q^ZD>f>hdEK!d-MrlnPWtnZ4)I~LOXnyvf` zu%kH1+{o$jyjwJct;q?kgYY{EF9F|!1%2>~e^Vwiu#y(pyoKcL2Al&Fnov1*_F>HD zmCqGy-x!N2;$B*q8PH~{fsh2k%m&)OtRTrKhYU|7negL2HelkUO1o#kPT}cJ1_K$G zGcoJm`$6L;C9CO*zoS)60AJgrg-s9-Z-EOt!Swn|efmtV1V~AQKyh3#UTDAkBN<^1 z)In$#YUM_4eF^rAeh8}8fm@l`^uxFPGKrS`i88@dlI3OoeZM}S(yTJQ9@JU76h;Q6 zyV`&@^o4wd280b+qp8~aAx9TOz*Ni{?C1jRzaZv`Mx&E9(b0;2n2~5=6#9aoZgA|} z6%zG!0W^~)|F!YYQ@nKTJA$ zpi>!{&-_Ur5C|!J8~G+M&(@GG;A5T7btSCv&$VVkb@%FaR}Tqh{%ZinTRO4bfcGI? z2M+zBfmR|u-&V`AQz0e(3|7bU01)4j^~?6KN2>(06oiNO=J~k?DGJraD!f$v_eyL2 zH!9~mo(F5YU~U@TWUk#i_tqW%+5|mkWQGuTFb@dR!*idA52?u~Fm(mVU2bE_Qfbc( z?dME^x0MlKPy9WJmi|80_lfNFvX_8nqxQ>@NbR%erW3!)uee#+U^s3;b2hbba1aRL z3N%-sUZ3c8hL&^f_H0Cipmgyt_-$`X0ZXPlu!1ssocbT5@rz_?Da75I2BgcN-{e^t zr(nO&H{$VbeBgYLF|eqO;UZn#u%9>K7)q%hiSYCX0p0UuLT9e*MJ#~oJs!Kv+Po*u z0P`8XgOz!9`*2?%hG^<{SNkiDUIHTeYsOD$MI0m-Wc_eU{JPg`k$w1Q8RtcQQ~Htw z&ejcWqC_deDHa0%b45%LVDAqC-ffV~TBI~rEqvxdkgP;{zaG8Z-y{_THV_PV)9fKV zmKmwzr=|Z$=lR%HAJf3QVO9q(5-z3(r z@8%~-!a#NM3yxoH;P+U<&yPY`2=?uS9F0AX*8X14GN|-7B;uSfycy<|lEY9nSjI(E z$B&rPS?r{R++9<;K2h;!d8XPS-uPf}KwX6FD!xpk*{^!mVtej{tlfU!IB-VjkDGKH z)^@pDF@Lso)J2y$X1H=YJ~PjzUdtlQVVjfM`mp}oN+EB*j-u$&x;Pq#%c zgWMR%P1o?q!pyKMT>>Sw!%;A@idN$Vz+_Z2(dTd|H=-$p_N0~BvmPd379g^Gn20P$ zNlJy6w%@<}QRy z64jOdslUG3ODF>U?h`W5qN78;{&L56j#t0XH_aYV5QAt=w*k9j$fWgAb3R_@t-{VJ zDAbm)TP$PquI8B$EIvwic&szXSHK!Uc;BU`=qGg-kO?(G2M1sxY*s0W@O5DznDGUt zdP)ngfOI)pS8I9IDn3w7%#1v21>7{*{iy#T7ckd&3zbJl;$7)8d=dv-l=Ba6zo#^|mjoEGE3GBzZec6jmC zQI;2F&%RB~q!$8-5zbHw5QD_+$wWLclvQ_%+p2`4WeoQwB1CMcvAgr#((pb?Yj>AuB|E@Y?b0ZIn#Ne5>Ov7C1TlmxBgPyxsU2<& zh$QKjzQJlyC5=DyS4qyhoqwZjrE^6}0l{cQ2>hyq;Xa6e0@`7X^Il7U7dgRCy|hITst^CKN)4G7&l@@ew!2&>B8cW53G zoeN2P_`&rxM@%ZdSpi8)JhW(8nG8ccIOGrT`07{^25ncldPVu^ZA$!);gwHm^NeU16-Zy}*$~9+ b&s-qa>sJY>0l^Sz16|I*=Nk{l;=lg^gtC!@ diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/multiple_databases.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/multiple_databases.rdb deleted file mode 100644 index 027b82d8820744002c43d2bfda91869fb581cb2b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 74 zcmWG?b@2=~FfcIw$G{+#omv^6nHOJ`T9jXs5ucJ+l9-fOoXP^?|6_tn7pErY=cT}< I*&zJ?0L{P~rT_o{ diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/rdb_version_5_with_checksum.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/rdb_version_5_with_checksum.rdb deleted file mode 100644 index 0a533c31b65bbefe79b4826f357285dbf3fdff7d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 128 zcmWG?b@2=~FfcIv$H2gnn3SBtlA4yD!N8oBpU<3>Si}G#fVhf*4J4VG2BjEya`N-i zQ;UjAiZb)kHA*rvi!+N8k;U{fQ}Xjlvh(uG%QF&7GD~t(6Z480n1QA+1C9AFQPgnE IV5(jv09&~%zW@LL diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/regular_set.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/regular_set.rdb deleted file mode 100644 index bbd844f0b0013af6a1dbde3f23fd6bf1ea67c362..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 59 zcmWG?b@2=~FfcIw$H2s0l$u_elUNjAoLa)hl9XDK$eNOx10)l33NjLz3o1kr+e)1;u+n{lhWYn>fJmf zn%%5spE&d`5kmO?`)_~$@BioD|KorC>)-zK-~Z#k{`J5A?LYtZU(Nqzrea6FvL$_~ zG`kG3`nsB^!^w{4NP zoimd$MVCV9Kj~jJ`FNI^RZ}D(!4Q?>wQe|oZ9CJtzIq?E9ILxSHk|2 z{iUm~Z{fCzTQ+8QWm~;{)WT-!&P~$357z0bX|k4u#3=gmjMQj63#EU&4W9x_^Z;~u*ME7>H8#f z=t6C;Qx-!44f|50eTE#Sv&{IGihX@cuSX)tv^@Gg*6rDkoLl#tqQwERbenl!&8rQ< z9{V&rTex~`l9V6cLIf*xc^-EkqxZ`ojm{~Z$JpwdlhKqVw{0_>)j}CwwlRwjEgnXE zFN~^e8?QW_r*#jXdme@`Mq9!?(W6$~Hf)cY&DgTGZ&neg{|8>FV!Xe%W7~=_G{-_q zT@Y5iyO~)cj2THonB%$%IcDhPueQVOLj>uYgP4J=!?*H z-VZ!X{k;@^812lJF-mQfCEV@SR5I_!*_L@y*QT254v#-_pOPhwaQGJX@kX^~VLihZ zz1r$T>Pzio6PdKvX0&CdZ|?V+TRo9ocRLS#cnr2he%>9;hFt_#=F8D`s`|~Wr`9tK zy`Q(5_2=YeM`Saxc6E|dKPdMP{ud6#DXBh_o2#R)tQBdR7wL#M)z0tgO#Ly7L`E_7 zV!T<2YTVzO`}Gp6*J9N5RX^^z`k8pz_`#N#vys*$`j9BPdkxQC^KPlHP~6|eUy^lH zZ+g!-QKsF8e%@qrD>9C2qba}ZwOw}Jh^4vYMfRw}5^4y78Q?Y2!`U$Ah(9=~FDcJ$!q2laplHry73{ z0||aQN+Imp$cAs>{Bt{*>nHB$5}rOGO__>W(zVfxOp^CJ0`#O;QZmaEG(9ftvBwPJ$pJ$GzX*ww~8J446xBPf_ z?6o$_MWb#XCfC5uP#g7Vyo3}?Yjx9w)J6;E>R6ey4mTP6-IOEwORv4OJ-dZm)9_qU zAMHQrU&vkupS!lc7FUKe44zWX=q`H2y5SD9@}dd1r)z)jUAc!fJ_0YV`Nyrs+uCOo zzS&JP1tX?s>Gu-vVH~1Z`QQp;Um`k zZ)f(tKqbF+562#XwXN6~k6aLk_tiJgy|cA@($PgJqsdAZHCvhc;anNMg?)xd%^IS{ zwU+XU3+;DiWWCswUA+aA29$Ix>^xDsTDvWhgf-154EX1>)a(dXRMWB!rt3ycSa)30 znC`L5an|l^Wa6B3ZZAfcu=4)z1l6wjvv&o!puhcs}C-F zKDWydO%Z*@8>!FyN7-24A(8G1t>j9rqg*cZ&ZIB zkFXB1Nvb~?7g=Ww^FuiY<4wV!>MfZKgnO@t6-`tb}=ud&NCm5 zft_q1Z0I{r`f7Kkk8PgEm_?sPHHJ(UwW=A1oT0kWs=xMQFu{hVfA&Q18rPkXr`GeH z`}kbpHfH8_=v#mCdBxpipNAZUN$s9En|rfz7lM(s*E!2=K0CiPt6sRD{L@$Ml+@Mc z$*y~c#rS<-@%?j&%QoXgmAvdDnWlQFGtK7l+t97Z-Syrtokg(`_w6V0ekPr|X6qEH zWiKn$#ssF8>B+*?(zNtYQjfe~!thiLFIc#yoX< z>$EmIS^bm#&XI%U*wtel?1a;7rtRqGAt4Eu`r5@Ur)eScS?k_+d|}nIAUYy(N(2ty z?Rq>8H_s#!gzWS3v!com6>iVjTh{L6YWnldgVS6oiG847D~M3^5ZhhDs^ri)-nf29 zW+P-{CwIuKsIQBKcS zRjQ|z*KEXDp(c5<*E7@i$ct%@wn}Ncag}f-7@N|G@RV%tS+nfNE~?W?5t!7@GVfdJ zOzv5yhcVFjIAPy3SA7L1q@7DBx$1q!ZS@zWxWR|sR3VMl%JI1aVv_T2nVuz*dK;?_I89@S+!)0=S)>zbT)dBY|6)rl2zXnS8P zH3pabG{(1up&A!aZ`*2cFZwz4(eEal9ECztBUIL(Ysyf5Y-6`ojM~GS-d3D5GiM@X zf0blnk8?ArQ;M`*`#ABo&F{{9y%M5KE?GA9?fr(u+m5i|*~eU0^wA@7Sn$Wa`<=yG z_e<&LVt&ez&+ifJSnBRr-hD(8^lMbw2UHHGyX&U*cQrO{jce$zM#%ph!4{_tdF9U! z^17s1n<;hXR$J5URqvf(X5%p`(PMWJba-N;i5HRQd{-RdL45^oHt?}WR3l!D$XkV>>u0F?{;C^Io^_p)?~$m{ zPd|ctUY*tL9J6omuh6Y`${%*y$99fG*y`T)&wb)?yFvGL-IQh!g&KE_Zk!1X94jHe z%e%vET5V(&Oz!V;c6?gSE7R$k8)@8#n4QzIPcAilLHXWII3wcwj4+%6NrbP{;ka-iE@)!j(*5IMC|lzm zw%g}sT1JA<;}tJW$dj8rTlQN|IHD}MaSGjoW-e9{(TJi*-CgkVBvlLp>sIvi=@9iQ zpQpBSoF`jp$+mfGhPd5lFZTEr-Cr0AXUg%3ELU!xF>&l^G!fLuIJL9*br*Fb`9oq>4@2|rF}@hu?>gfR|%e#W}P9`el%Bot#OD| zUj$i4Cw502J2ug8Lr8R}w(%CZeS2B{y`AmTI9%3tJw$8n=cdRnayuwRso0@E5;qHK zxn&J#sUEMT33!(Jv)*GYU@dsRU18V*03+hJe#y^8cVZAbJlR@*I4k44*?Zl`y&JJE z;(J|Ay-PFVM{K7WT*@}t#Y~YB)pXI-NehYi1h9i&XNNOkc03CL_{WFMx61xFXuPYN zhFi)zE@&j1t5?ssaZM0UW51qCi?gemBnPa^7qF#K=^x?dt>Gv*A@U(8LX%`&FTz;-DXWms0P>Txq+0~?F>GFvFy`yu@u5M~6<4Zcqy^x#q zH?{W^urB)h`lll7Se8fNz|WQ4%-FZAEVT0XFs&i4D@rb+94fIit7uh&HzMBwZJWKI z*A|f%s-)^tE_$O0%yXb)dGZj}hSCYqv8?>DUBCYH+@GPnV)stEXT1Pf%_Qp6R&=t}O@NXmJBi zDmQsvJNcg0&lrXf)~8yhhN6A6Et&~b=iR+qEzteBESbu!WNtg|&P<}(CEfuy=i_{! zl1`qmyKxKp&C-|KE}LE(F40VH_i-Gess=HPW54&i+B%xd@^|_dXIuSr=h*T(-9tZh zbPTOs(sW$o+26d9SM}U&>y_fsu9cJdz$?)22J61tQJb>sGLJKsWPCjH)+jd&b1=C^ z33gvrSn?EQ=ZB{JbI8BqXESVN7#tnRJ)mN!`=;9L420QUhZc(!bxY)J88NqQmu0)f zyA6om{e9&nNbY?{+M?fXoto05%JzGMTA}7UBQ%o5n{}V%u_Jzd-gbgAEG^Tq+t#Y8 z_=7V9>q~SvqWO__*P-c>-9j_yjKcnu-CFPOQhcB5ZtH1kTQZWI1ZqGx>?`$~&Np72 zcK({9^?NRH)$}+<+gErP@oPILN;cZa`#R)|Af~LQ^{IBhXeE^r)6|$PF36CxY>AmV zw!T;O^H3;8o7>}Gm8V8Tm@C(#lx!E_w!J^qImO`Qb+@4{E!{|>)qUby#AomUno#nP zTsz=|;D_FHm|552V5W1Qy45c1ak?!92--T&QK@6X(bB9JsE00|30-YOQ!Df zrV;BEWtaK1F8~8b%R1IsKfO2n^M?bC&DsyDEpa|3Lo^pT7%a6DXMqPFtx8YE& zv1SU#nY%YTiO-c_-Bpib+|*P$5tW1>6yT>kTiv2%tZ-Q~*14Yg*m-_G+tw_l<@!2PUGIAp zh9pq|oen3BqOV3wG|2Emv#mea;Adp!QFDK^Z5(`@#{C`-+bPo65sd*Vt)&|SAULsV zT+gpW2K>RLp&hd?&(R@7t7jj#O3d?uNPadamIPqPxv*_)f}c69wzs~}OgrxoS?PWu z0<)<*h3xz!fl+C3-P>6<*3@i3T^m<=$!B_VI{l3FH9OC-6Ug2>F16YiyYr1x-zzoM zG#<@UiBzLv(PL%iRmwrWt-vqx;V^!>WMcD9#5j%i9v==yWi9|B`nG4lHqkmySB&Al zmWqt!4OAa-3PtB5bhbDq;S8LV^Bg7vED3cp0YFeJ0zlddjRrlM>uZtE2`eFiLj@q& zJ3G42R(hGKHD<}$>O=AI9-57wMkp7u8V-FSN>10`D1d@v>cobax>|0-jVrK^%T=FV zq7c!#2k51nV_N57LA1Kfi`V=D(&oaRw94&S8lpqC<|2)r`*rqflS5~Ul^ZH#4{{`i zet8CtN5+wBQ3HuwPzE`R%HWtr_ROf`fZ08JIkQv6*ZncI3SiE~nmi(ckk9LvDyH9xTCYjLYHZ$1$CW6u%+F0-RpC|(X9SN4FsZD-LHC_%2YFOl?nbR%L5f!1BiC;{>pQ+UlMUTlb6HfC}y_p$~G3O>Zal zi*!)~-9SeSqsl8VOF`$jL#sDX(`Li|Y zsc6hVt*&S4GC(t4YJ-A?w7JX@#@v@B66+3eb>_IbrK3epp}FjH$IanejA}po8Ew?O#Aq6n73Z@9(7&5W>&Z4l zSL>xwSxw^2GGAz>1Pjvjf`E&=&T-#Jt&iZ_Cd{i!r)vN&)__YGQOGy79x3)1bhhJd&RhJ7?5ZnzOxjXys~GFx-Cqc45a+f?;8{-6yvisfT)%{2CR zv($WQ7&|FA>y9=*jn3CjFdwn&%{0uU%>yG=Jl*zG##7z*6oH-GlJNWLlv+@n=V^PF z@av8vLNi*j9e2B)UerU%yxXa45pg4c4!#p|votTXZIkg}6D5f_+Cl-g0zuU%#|oNh zz}e@04kQB+#8wo(vd53eG;Dvzlvr+ID_nQN+6Q>5hIEGHK+w zm+h!N!z5#dR-sRu2U>0W>4*U1#DkO{1`miVOQ-sV77hS_lKD!PYu+&V31%<`HULnc z7ZRamTlRKgXL~&R<52-w@G(w_9c|!L4=d9BbgwO*MSW-Ol{xP2<)jGHKxyiTsD^X^ zda3t2#U1ZK&p810ij;C6J%8Vbf8HxFt52h70Y#SEo}lmt(669~*oqzM_5s7hJ>5EY zc5(zNzW_4A0sYm_vvuXBbL&oXXW2}8&DRy(nHK|~njl(xX4>@g)GeT->~_EF@=F~; z=_rgkXLMqDI1dnNdk>&)k=^OQa~(kby1fbgH4W_*PC?)F@r$u|gsDyGk<^IZixhThYD|5*~ zd=34A7iwl9@hR7|dEM0xZTEpx&iPs%f{cE-jNU#x4XWF^s!T=V^dh96dyT6*pPu7T zFmVF}WX%gdmvmlNKQ-s%d^ISAI_MtexbnL_(URa{zHT5?5fqd$UjnFEecfnSTgRVk zxP5m6skPl$lifW>kK{oX1F)>UUJ9DA<~g8vB1e*RsYWimn-DXMJi6ZNx(T#ByL^#t zy{2>n9GG4-jDPiSY_rCJqbx@i_iWX=7NAv&K4{o{V*$gc3#@82GOWejHj#IDiTdkX zY9>29=XB4jqX9CGF*zjhEuA8x)8I{C-GUp1-0W-Q~Hc~t)6(inf4R*K}Z!=K@o*|8>%NX5PY^#UbYfuIcI{l z9WetpH{;+D&}OZAWl_JbPd7IcEl25@*@R9W zZ$v&T9*u^RfRmDRY)n(o=1+Yc2Ea!{zGl=rNlvY)7lXsd(E^mU6R$`@vI#h*pQ=3# ze(hY~H?WCvb)5-|9#3y;)->S8+w~k`oPip=Xfgjx(3+VuPY@xQv!Hn>b@Y`V(SaiF zxOm;Ad(hhS_X!}Q0#!YF{WcN5YT~=+$4W}-PNq}}VAJhHPhQ-(^8P}~+eb|S+dkOh zRUgK^p%MCwKR=uWS1l9sIaH7ww5316OW%yTFdR4FDq2Mu_iOcvJUhf|O(ox5`NnVJ zQUG^}j`uqmv^Q(E zi=~9CN45*<=_H_yYMYkP3za+={c)NU*`|*8`#m9_$x}0E8q`gm!n<}Mok{YF1M1O7 zZV0e7stV0;-j9QCV>!AOnB!Xx{P{jFtW`bML5LFc$fZ_5f$ACn*@k4N&a`u&Gk!yV zj_{Twgf&g<@kZ=3xN8Otnw!Q2S&s@HQi~S5>tfRY*>gcRqzo=U13lb%pNYFPzev&- zNl-Ddbk}s=qlnWjlfQRmPTG>|BCVMvjq?f|WA2cK0jJ(XiXRyJ`;F`gLDkhZt3$$% zVmW*#{kYhbp{_lFe9*4abbDs8P8mhrVcw$R^Nngyk2m-(&&!1e+DC9kvHy4>RA0Cf4}` zq!OhigmQP|9Vx2cTbmktqln@Sf0j`%J&-a^ARq$PBhROGQwJ4Te&rkDGxi{_T`e;6 zc0Ubp{XDCqMCK^byB^eqU9DGnID@$aS^jX*5`!yntx)+qq22sjw21m%1)1 zh-Xe>9p9k!mDd=bm(8-sQ`@7CwG0*Z05Y&&jEr(h(DLafR!?o*4P^)jagO>J_XVx` zlokNP0XX-3?If5TLI+zWK(j1S3OEV6;|ijIr9+h%UTl?p{dC+bWi7~CwEsKmtn;QI z7$K3+Brezg1DKcai05SuBD@~V{=vI zC`t;@)5&&HJL3-LHEOjLYcl79rE5P?}Y|x+GmSQ)lIaJpKXuTE% z(f}dqnaovZYcH-Nfe}QPWg&w=6?#ExTE8*#yOWo?4rM^|g0wp=0|kX3+*&aYDI_ZM zlcA-lJ9+gHsA;{f_LZ?#pC?}5Hmx)Rxjv5b%ywdzDk2%Ed6Hk*A~8me7$F$ufqja@YDiI2n*d1 z4$lVBv`ahwb-m7ao5O(nJ2Yl05-Ro0EP6QVc01g`8c@R>Q$fHzT+yw4p#rAH0}X&9Li6WEeL;-lTiW2tTKe1a0hk?uzf zD?ZN>WWhs|ojpP}y=$Gf3s2|EtL3rX^8xW`A4&wulegvKQ|s*8F#aNhOn9txz0+I6SMh}8uOTg*)BiWr$$2yd+O|maq_A6UG<|XAN+I6JYf&?;1=>9sCY`26; z+lbQ8t^q2-tYOH9=`~1vw`e#csLtcooJx4rWGj4*=3K79q;{iF^7kC_$O1VF>K8wQ zfy1}xPXrY(#bGosM{o&E++9D9MlREaG<4U)87TR@A_s8}nHjKlh-jKAd#IGf zRB(n5hw$$4w(WCWAJ4iE14@1%F2>IT7^zc27)tOCpNAZE$+))mMw!~k2(<4!C3`3YF35uy zj=I8S>48W?<-d+7Dh!HpJj<`R3;Hz@2`r)k{qKf!-}ioO1QHGnpGQ|@TN87(hJw9= ztQ?F6?uz&{EE8uE06DOc z!MvfiIv>peqSgQ+EgM2v@qhEDuARrJU~ol1UO6CmuvT1(X9>d!$!) z0x?QYYJT8E#H|wB77O!BBhZ=da0sEz42%p+li%P`P+MkqU-1@qZ$||pAcK2I1#z;A zH#UiV?)A%=Nwl>hTlfaxc6q1zb90n-dz0}AY| zi5p<-&?q__gokFjOg&Unbq-k1uMk$T&|rz76Sd&RrQXj8q#s)4}Po-fal71@? z0#3~edjoMGNcgU&`a<(PU(KTW2U?Bp-P>Qjk@guwVFBv%RU8zR9ADu z6S|D5^qHyoc#L4}d+0yecF0H|Dy+OG_f$4IIJJ0|$KwV&2_bjA)lgZN;X*=`2{LpB zk=<(*`^tGZ-pCmaisHto;ht}KZgb8X1l;EAFVuuvwbWKXpL%3fBln4e&U?e@ZrO0y@t#gD3ydS$&r+ z{&*m!222k4xpn#+bLAH70HBWsnzt1wri&{KS`}MLLQ;Ie^3RM2?jFtx*fsZt z?%lBCngJ1^2OLBMjIw+j=u#j_2W)1|ivIfQEle?xQ4HytA=9aLC@<{#zQgBQyej@- zdO8?<6Z`^o(@h*WqPz`^7qAVWhL<2AA-@h)?qSjkPWyr0~|VCD`?9b<8{Y!OwSIAb|0XO1MP>@4e$%qVE0y& zU)o#rs0&jy&<-h(l#9Nlf+SjBj06tvLsNh{E~~utki@3XqeP08fYs~I4~WqQ)v;Nh zYPMaR+u^)#0zWRArJh0>carAt#jC}h18Yw*dxB^A1+Jh>fopLHkl$9Yu)KXe#j zEbEtc+4zM!G^G-H0t|3QIe*z@?_#Q#L74Mg-rfYU!NJqjqf`rgJg%VwzESr)f|8s= zNvrSLb3av`Q9EmeMWwIa=vQZi>@q`01Lp=Ll^z|o2Ul}o%0ONBzGuO#um;x9Kcc6A z7Qzqbdmq7PpljcIoSh8ic#Mr8*~31sxY_zM!|P$uldPMW5jW9G?E3W={VJ(igk9~^ zwxQNI3dHTP;$=d(A1~zH=LVBUkh3W)3Ib|A*bwk7zodt?OcK6;cpTt%1L&y(%}D6# z9(vz1VUhp@P_Qd-mY2DiE0~pWtDy+~^Midj@StOE>!(Ft$ml#e>LK_xfHbVf90Bg{ z4pw7`O%^yWoASz~zbJ69qPc-#&BN&2K*~%Pv%ujyO2oYT#i`QNEMUMVI89--p6nT4 z`y8c>Ufmnf` z1MbsFz#$=YBU~1DwXeqv-#`WH9VDuY76O95y+(I%zJ>2+;D0+$dcc`O03yUCT(uj6 z3BK>)2AUNlPmhnd;2LLekuYJXeY4ml{vG*Y{28H@w3ws-&KCV|;WTf^8z>8gjc1M!5S9K#mo z{esm1Jfjg3*#ot*n!-?z<$AVtgI}IOO}V^m`M@L}DhG8n#Ua)ip{ABMfe{_+_XXP_ zXhn!`CV1zxcK}cV<{vy4WBU*eNPi9k5fiZ1@`N4=L=EL2p!xUB|IDN4KERUS5s(t~ z*Q2J{To7`u-55vjc7X zGB)t)KE3J~x^c{WfZTII%i6pobA|tqb_J;)D9G*(p3cymPoH+=-c(HsBr^tUPm+g0 z-!8H)FK$4;)`IO&pE?giAy7kH07!SC1O$=rnt8VUMgxZglu+;-vE!Dagnqx}vlFUV zdbMy>kLF}1aBU2eO$o%|)61~3f(t7iY}CZA5deZOQ@ujEjY1uSg+|XOLE{g14%%FU zhO0>SQourmbO^JTvO6qquzC$5yj|eum3pa1IBB6j?7Uf)yor;EfP62oG*zb1?KeQ- z7vso`JOG&%*qZ^niLLqb!ytf-zPSU1jGiH3!sp05D`M&KoM`kXq_KOcfIdE~ZXwIK z`X96CpHUe9{Hma@A1tAMgS}Z%^_Wx#;zeS|8!wL)D4uEwGoPIICm=)YXMZXe!!uEvOrn( z>t2E99if;nATt)!Z%&}=t&$6ScP?-%ZnBQL8BJASHkSR+hF6?NKNBJ_hHGfh%ZjZ! zCa?gIO)kp~HZdMq1_oOpBMM`sJ5`h}r@O-zwr9wPQzXNu?c11M^{Lr`)&(Jb9fP zL)Qr@#BR7)`jbSg*v4R+Pt_DLX#y(8d+BZ-CO6q^< zSkCq87tzKsXDjV;j6s^Z1qb5?`}aZ&+hc=tC;%L`>@&ZA)XmVNSp?M z=NOI9v%{Lso;&7hBdJb{{QiDwR8mg8C^HduP70wtG+ZlNMa$V}Z^{-F4Pm}bghmG&{7dJD1EnH@zepO=3OY1I2yMHU?QPn2 z8RyDya-yknvje);MUKVbU-cG{evTJc({<_wdZp=@Nw=Pu;{ioIx;1*E!O=k4haNTM z(V%d9ALiEv55r}Ku-i=xriC#9xVB2=2BSK_edd#zhTVXefGpB$^znw`Vc2JP1hwF@ z`KKw1JLw8QJVNt?W&*hyFNl+*zMytleaCy-lQywA1ZY=B$DbGHriU88m<;f9PigsPC!MMIW@ zt)txuFVw?N03+NB7yOd>?E40N<{(id~ z8A5Fgol$W@m!1iVaA-tf=5;*~3WAv-Rlh<+3_Q8lIH4Gpb3;c>GUw)H`X6tF8B#GA zCUQ45D(2#Tz__e1st%&@ zkP)CKeb)HyCV-QHOAJTrF!IS547!@@4Ks>bbe3=&Ru-;!9Wjz(({9fTGuP4l)L5T~ z3Csd~abwj+@wk0+DsF4OWdw3to#IK)D)^D@6`-KE{p@YY_0KK)vp{&sl&QU~-)_w1 zso^34&W%B50Hz8(3(Pc7@=4#h86G!?OYVeA@*eE30-`{miy0#35M7`bjtixL3Gs!Ec`n$u(X&h!TztX7%^VD+Uw_5sqR!Qx$=&iP232(Gvs88qh7` zQP;&UZt(w;qlP%s1%Ik3nUE?Zc!&xyR7TsZ;5GsgkEK$ z?(obIPt(Ef%r*gi^jRbvzXxV)Jp2ZQc^|;t&2YWj`x$?b zCK&|M?LC?yOCLcsJBs=nYJOyrQ&wtH5T4Gc7a>GvuDej=;+=jahAmy&y(y?>I#isd zZqJds=T_Iw9?j6BY;N3|0IUKi+IBec`l`O(37VNBK*8Rem=6Kdk*3f|g>Dnv&ji&a`Q6N`qo?J`%&34N#xlP%FTg*elMKfKSCqtD46P_Vp}~Bw@)yp5F*OqgmE35drOXue zZw|f^HlY(kAocvD$-&qJr;=#ESnN>t^snde>k48U`rGBPE?}$G($^~VRAVj>sj|c4 zFK~+X7%=8d(ii|ZEw+1SSbgpeKPXi59@HX@S{H zjeiI53>-ZeNDoM~A)GzvBQkV26NV~n%wv8E)yth(?#^zsruHuiMa4GUKMI9URh7ak z=uSW@Cd|=JV7tyR8wglK&}d+H*jCY$jc^V%;?}ZzF!2M)1!(XJXJu8Ob1WVnX1Lhk zPh23pe$ar!ad-+K0Yh73P}GPi2nS0qHuG!vLEU+w&I>I+PvDEGVEcVS83~?YfiE54 z(|LysL&ZrBJv?ELh#Qi#-E@Q;U_i@$KrZ?8P-fhb&IY5@xIJJN76nseG88Zz>ZM%7 z+aX8<8h+#l%Z-~2Ld2a6HSG_RCkzDG`hK5B4Zq#9kgvI+V+Csn@(W_ z4Jhx>Ajcd(#NX$SN1b$ak?K^p>7ZcUxG+uqYHu2Ofd|+TFE%j_WoDSmNBB_8JKptM zZi*KD89{`g-|I`%mDk%UWQ88h9qB5&!jv$ktGPuYD3}U`dgQ&-pSK!nQH)?b{JtW~ z_i8)|hL+iYAtx#vldA=cwpN8{fNZ8+Y;XC-@h!imdLSUe>7CAnw;1TU?wD+Mz3424 z(ZKfBpbz)+sh+CAib6m^|`rVgHUoLjF&|7EX2Iq1~&Q*9`HP10OfHxag5Mf znHk@3UE>2n@g*oeuT{;&IeQ35ZEysvBakQ+`*yd{kGr;+O{ZFM*{_VKFaB6fsm{ZkUi-N*VFVCXk z8PtHajtgvXvlv#IM;c_$ydeO!4HEK8Vu{a+4$v@J;R=TFIpB++TH)O~Eudzw*&`^q zxY!lo#E4@;r37Kw1s&?yF0CPK#hhUdImiSd zV7mG1HLoNKD7#^{oMs(j>cfybcZ^*U(Na)ubZ_$iBSf#V_n_+mym= zSb-T+FnhqtEg)GtAedRTf+pl*rC{nCq^>AjUnk4?&K&sa+Flwd3z0I|G_-7JkhB&( z{t}qo`)rflF{-pXn`H!$j$xS?5i)`8#w$fzD}vF-{rckH7{td2>>I^PD0`|3;!gpr z3D|8qTTGYeS^s98uc=~?v5k7i{6OfB#1lGHk{L2A7&wvpsNwIp2e-i`1_=^|gh`8W zmRLc7_WkKed>LonMn#tyBGkHUn6EcXe8r##3osP4@T)JD@}g~S-ck>lfu08@3HCc^ zn^#jtpjFV^s^I*_>DGv>#h8108)%c)Bk z2t*+DT3d6b_Xm;{` z%@=3}J`W;dkAVTee`^DWzuIAQFqmp0--LD;&|^&H-jo$*YXvklytM8wdkg*r=&qUWg>!erS|150i_)J&VPa3=D^-{$>Gs*UEI_L%=bpw!_jmMdIn=d ze98;v?bUgz(teHb!eK(rO#}vQW%vsh0>?V$4Q0dN@Or7tuPMzHrUTBcBYW3WV2xLx z1iMlg7Laa?7Bqxs#4rn+*b0=V1FPGX*NzDITZ_HfvY7{0SZb^y*7?ARVK%pJp>Sj`nS zMSW#pHer4ZIvxP`8G<4VZy1r}9AsG{0#Sz#k05MwAdfA<9)KTx6@26~a{^JJ@$v)d zFe;i7C^o<=+@hyo|BPG>I=!wO-r6ic%v4v})LZxcayi4nj$V{j81PH3T|x2G>^p1# z=^A~t;J>)ym=hpc7$p>dL^I%7ehueSm<{hp1TZQ3OJ|79r?A8Jgr9!Vk~BT>RnIb{ z4$?0^>hl$ff2#?Z1%^n142gJP$>)bXF>0&rB2A5s{cD5EM=po+m8lQ3ZozmECh^Aa&*j*a-gcJ{44b#)$ z(?GyV_R`1kw5(6OfvZiylFhLZztXzEXw8J_Jn>R#0${Ch9cQ8-XxT4VY`{-r4iH#d zm_OVsF>g@+jW8QjCs1foNWoXMEXHDr5`x6DsH-L;=2~ZnSi1tp>b#sNhMu=B`GNwN zZ%$~j=4Zzts3Bm-82A;UA{114R!_X_XxIs?P#FBug1UKtcD|hMpTXu`1_Fe3896eY z9w8P+EwxoZ+hG}O<_na~f+6)7OTb{K2n7bR7qR^%HhbG`%;c%yQ((%}1dvAagM4AK z!wNa(g^Va+Ga8OfizEAXfBVWBvJ+*a9wp|!R2?Z8??P-5QJx4G_j;j80fh!m%bTvo z9CJucunC{M7f^h-{kjMyCR!+3@Vg!wkYq$geryPUX*dd;k(7@$RH_}6lF#uLMQ})b zeBi;G`oMVvO4?}0LZY6pD0zPH&A?*iv&`fe6Jrv$ z9djnS8bp|NsAn)O=R6c@;NwLCG_o+f-I$hqEqKd}-@|ZL6UJra*p5j#j5!d{>7jmt z#&}pTojM(ypj&P+2n|c9tJ37kU`NJnF3kn2IEsJULSpx(zc-+LoWd6_hd1Oll`DG4 zqJr?lf{q2Z2o%dg#WbVWL{S$!ouQBIJnh39iX>2c%xWdogxSrp=W8<}@s7bK2gx>S zItef2lWWmOjCWV_>)UvL`X_0E0|bLl6%cfvR-FjYgWH$k{fm3RpoOj7$PO%I!RSVS zm|Xcl@$PT!abPeMEDoW03XG0bgI}@`HJ>rtaWXz*kP`H)fU%FIU{nFLg+Gnucfub+ z;B~GE$I$nC#_+KWaer))R5BlKOcjLJ9@Ba7`(9v#KA?4f5!0{vGK|f&u9$>73~2IbNF~hCL0w4kFuw?G3TxU)yF(io@ZJfJyPa>wFy|LT$)WEKP?()R z)AN90JP;oJ7*RiERDun#VH^**fgMpD^RMi<0>&V(`@v%b0K*ikV~#zAfEqq0%r!EI zO^~|$`C%|s=LeI#p=#Fz!31Lr!gBep0Q@^K=+@n_0m6<6LnF#P4^z^6M1GBv9P`qi zN3Y?j!LKsmbB*7?(*eJ%n)|Ly6P^lFvU|7~F#%~;8;~_T%dZh#rHUwfF)ulZ8`|NK zBKv~7M8xGNt}vbhAP8Ik>!2?ie ztOf^}f+zbsdJl%h>SphVu3t58iI^}-r}4ppEf}i~*8t{=mrVk`2j&e34Qf(M1q2}S zc)&~K9kKL%U|96oMWp=FkG!4@EYJYJpytB>Qw&CdAjy}2vw&Z;f&74k$ob!22E*oF znLrhM0Ky^l?ajIW|FX^_J6ar!qNh)9h#ZFGC_o7W``<_RRgGMfXWfI}b{V8Eymuc9 zcy^LPp)a2%96xIa!AR|W+j5?-FQLi0DBPUt8)19HTs$*>Rb zZec9OUtfr&%kIo_K7*kHzmSA@QX0z+B4H)%Gz(~iz>2E@=9D?`#rqBvbwv--4nM1Y zO1Wy#{5~B=ouJj5q8%`;GC`uYT0C-mL-AeDCEp!!_usD!C}p)qIMypn5FGrh@d4f{ z@=$ua3BLK|xErX9(k(sjrb7b7L-A7%FjKo^EVBjO`}TBezv-$^gZc;o*>`1P0{l0s z=5)DWs_C=M`B~tZ{QU}P?Z^xu)x!=L`4Zihm zx9|7XmuPj0Abg?^#s7HhvItn(pva;V!qQa3!CI1-s!g^nq)Hw<$cX;U{rkC49W*l- z7P~jmfHrZ55(o~4#p1YW%j)Q)r6Qb$PYnI5%!%3|41vC(j0l}@jtmVjqu^w&3-s{P zln{40hAVP^DpzIA!Zjm<>uI3(8$Jpo_kX`=KlCBjx!TqnU>uY2YzVNY^=>CT1RxR@ zOpKiq_$}Z%xM%EI{Net-A~v|^H53+(O-xKvcvWTto*zu@xW*CPc^8Tsks32ww1^Wj~E%yv2q# zAg>5E{(L+uu_4UFrQ&8X&V1Yd|zZy)Ft7pq>EZG%^`Qi zRG@1z`8ERHI24DtT&l#nq2w`s9%8Dz3*J{n7&OW$ZI;zkQw`;+ama}z*=Cw?U3#6& zUz6U%F|GZ`N=*3S#?S9SC&PYn*TjcWPQWLlEram7py$HS9HKb{Kje)hs$iSiOvOWO zk~;+vX~ zA$2(OkT*kDk0&OvxT^5#V0$fr37A`$3T0E<`nB?_a|Wo4HJc?ckO3Vh5@R{ayqJw) zZWZcm4wcO+;Jzz!yz9i(*?CCQPv2ZZ)W#_$om*N0M5Myy2TN#OdS>#euZ=ap^PVgx zn@<=+m?7{^e|HM_LCXv!_o0&lIeIMm3I?4sS=2J1+jCTcv$#reAR*;tbNpLg`t+lJ z1SP%`>jbe^C0*0)eQ3*tGd@|?%4t_s!2(nfUsQo;;W5Ja`}gP5Ro9+Z0DwU0MyvzF+k76rgV=!fP;yr-*@6eY^kJf1H zYnDhYo8$-UR+U4lIog5*NHcS~+mged2%o1%9_^p4zpr@74@24ul+tH9hcSZFfmz>d zxdUaG1_*1w(@dSb8=w&gYU_(+Th$JRT2s-t?F?0 z(X^~^QO{G15q#+6*Nuuv2gj&~Fkc|xf%J;4FFGE5mf6EHbl*^0 z!ZE%Q@|XOD|GlWAg{c!n?f^F9HYLi@GMU0B(ITA76A4S#;W#182$VRE#k@78;CqgP_3jmGyut(iO zoVfzLemnSlsP{Sh(P+!qac>nouzN@ZjFz~W0EmtDqr7^(^AYyWvKvO#Jc6bFLXXWF zu>j47KcFAB3JKd|4!}0P5}t2f41T4bb`Ikr2!uqy=X-D^I=aH%-36dUzlUSJ2N8&Y zLXVwx3@)-!&FAp_J~<+`Y?h)iF*&aWvQYjmh$f+dgXuuQU0w~JEv@~I{PkGwdk+2q zZ8K~)(!otKVw}i$5Zu2lp5?Q(;XYIJs+lwFBwVr0bKceR;urX_BW{d3VVf{OWBLg9 zFd>>RzA2UCOr??;T;U-M8iMr+9UbO|?*V@blQ72A4$U&X#9`BnG11M4$Y)-V=fPj2 z9ogxk+gMkaM<~46*xdP8x+r@{SU<+u}uawPDgooE5NlL|1j`P^xlwuf7B5BkphC3`O;C>&!$ zvRpi5s-aG%8}U(0gx(1EXkPmu5Ak-+Bsf-)!1n)XHeLfa4$ur{6&7O;G3pM`GmKZ! z_#}On%|wV1FG|b|(`cYsc%{%n)%-9or6r@sB}lSj;D8y}4ZG#QwaWmp#loP6im^;f z^dQogZGf=av@2#I)-K=UFhUXw5zPldUI%c3nm>xVj!fEl54BG*75Y(lrmRgE&qO1+ zlU^wETQcQa%ofh;n~0i2^x$VH(9}gh1P+m#WHy=|Ffv#h^wnVDZ+{^{Ves)hEap*W z5Czz>so-=#Z3-Zl^@&$NTv%U&6x=^_GM}Z%@9@(NX9@>8TJ*jKkhEbDa&R-C3j#B2 z21IHZ?9>6POJ1n_HqK>UA^GYd*i;+bWE7@HLgXTe7tsyC8I90F1S-b}TaFaZ?U416 z1hLEZ<->t}=jeG)NKqxvT`?p;nj(mC%V-)I6XQNZIp!E`q28y8hd6Z;195(Ymah&! zr#aSWA=s)t_;nsItYf@r(t1iw^iBY(XB6D9T}1)|1AKw|gpcNRKjH<$Cpv5F2ykeV z9Xm#=#%DD&LQU5{B;7`+^IFsxY*aF2-zueO;-u8)k_Vwo0@UAy-L8HmXt&f7q|cIH zWWd7M0%cnzTy-NBDfNMHQ5q6aiNsAHhEE$-Z(*tzsAVgN7Xo)K#5Khx(AnoKi=Gxl zUhZ%8?@pFNUfNvh6XY4L7rL*yEKth6#;+&25vjPhw&vh$&777--1-chm zKy$g)xb4Weq@`21=ucoa415qq&Q}Sw)jR@o3rb6mc;PwRB;q@&;(Kk0G)@AG)s+;- z2TmUXZ#EL<=uLb*Fhqdb0DB1%%?ROJ zqUbsC-^^$H5&yh|GqES0vaAzj%|FH?z2;HTuwe!xyv8gAT-pgzD;s$^9G0CcBR>}X z$-`@M$_}CuI;1Euk|=Ol*F6;cMNG`r&nA2Z7Zo^1@QTF<<0dnG3RM0dfFUe1AY|!6 z2XT0u5iL9`i#0x-E!wR-eAMwcd5CNe@J`{*dg+^6|aGZXSiUwBGm9GN# zI%m}~VEjPdzh&?^fjI2^0W<0;6XNUOgAG(#v_RT(1aD}?aV%kW%FfO8Jt}pAmP{(~ zJWQQ`UO@$0su+?73obFt7`G}X(z9$1o6@S1kP4%jb}!-3Bu25T2|rN+?>*t211CXG zjUW`}3SIr^l4z}gBu#$e25?tkvQ-5$9Xw<^(>~vAy^BJI>MHJ*6Pj=`C;2khh!0Bg zZ^e|LJAIy=Q}l>Xg98L-w7Y!tQ{Rnz7&$o~((?J=yVyxReo)Q^Ec%~VD%9u1R-nkl zafL|?UKx~Tzv2J6B+v_2ljjsUv&*_uOKfe5tS$>rPJ-DoSUhkyrPp+=3+_1JjUl*DBV%5VA1C8#nC#`pI-WPZ zQ;MM-YbFTFMsm*6$dD}7h(~3CEidiEWW5R_`hK*MsHw&bsXLqTayK1}%J4)r#fU`r zjbv(x%M`FB%}hI+y1SMTe_9_&(pj zZbS^z8bAbde6avRTbzwBDhfDwIt5o769uwjW18!MP^WfCJ$B#1E_Yav{}zoss2OOf3}P*hPbo z!zyE9JsJKJ93${@-YLELqZYdeo4v~8R%3uIQv#+E<6on#k#OL8=0`e}D^9xsbQaF?T(ZgeeKn7Rg(%P7(2H&A;*k9B#Eu zxx3pq1%(3@*FN_~d&(W9VgIN19kxIa+0fq@9F<_^;@_3mV-8^=8Xl}ZL^;hOh!G=^ zm| Vtu1!iR!YVJ!&CTr_4Snh_di`wQf~kN diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/sorted_set_as_ziplist.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/sorted_set_as_ziplist.rdb deleted file mode 100644 index 37cc2199151e46d077a9020acdb18116afbe0d2a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 178 zcmV~$Jqp4w6ae5zRS+D+pSht2kR-Qb^ zyTeH-1wS$0wAXtV>#~hq8QSu3zFp33_bFaQKkkJPPno1sA*5r=!IE}?oUKD_g7pYa zqXV02uXOeviLtDhfN>+MlZww diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/uncompressible_string_keys.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/uncompressible_string_keys.rdb deleted file mode 100644 index 276f334eabb4799a7083c569d96b6b0e61db56d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32604 zcmYh@ceHIuUFiF{(hg^z)Q65Vu-49b@2Xi_wVU3&hB34teW%ptIxL! z?uI1*0R5+?{noS7XaDEi_QS3yoPoqnYukAq79g{8v-E`Ii0f?5kPw#J7+LMqaTRqP z7lVV!5_@)&6+M}ARCJ+q;=uDN4rJ1n*hZ!yiS5ct4B6bP1ci3ON#{9<&El#^hP*dz zT%@$CJ7?b4vef%7ZKzvTP8fCpj&$r-ISFZln_(WNm=}}+BTicy2bjkY0n^a!9;8w? zx;Y$h=Oit!Bd5e^nqps%O;p&8hdeZpl&$E;_i8s*BMK7Xp5Yt3`V^}E#}#A67(I7hjJcP2?w>H!t)(cWIAvK zb6rxKbqQ0Gv3XR+U1BzMZ+jVNGmAEm;j*Jm-W7(4TJ<{w9V(Dthjz+C>^Qy|QW@k< zE^Okt=G=|+5R=@-wT7&5Zjv@2xKD!Mri@22@}O>OtBjHmkx3Ot+0d$p+pO z{+!1#$AA~L4rG*MFzYDu8pjXI)Cj4dbH8YZ(5a)Q^wTaXt57$y6zUGnZNLm=ZIk3-i>Qd>(6u4zWy$lrn(J+1iwGI8 z47gshLQ1zZtt4(8$c34UIpto2VI4+h09hD-%x}|J22NJcID=I^DV&%to-mKGL9HtA z8aF6tfuUzfmz0(zxM;gJ$`-{3NbNL({B{K0nk`WNSRLuk)P-qoVC1Kb5Z zvL!`1@Mu>`OFSqZEIZG9o!!q%&jUfz&kTxCl%!L>Xdwtc%79u#by=sG_uEy@A6y_KYgdU@#EXPlBt88TB<2bGc zJCA7}1vUb6TNb8S4z^*njT!ZR5|TazXoxHfD6hJC6gCxdqQn7QZ^I(Z28VYLn?(yl zlz?O+3@kQYI+s>Y23&Y?S^8E~!UExzmmUPH3@Rfl=dfml2x{u#h?*T8O2P8HpoTC> z8h}pTuh8{~SV0jsPFaA#>8&8%{L`^#vBlrc38`GCHghv-ppo?@bgKgTG!l5=rU(05 zMVZAiE~~y2n0nBu20sj|4*MH-u10R_#^?*_3-}#;!95l3k8^pdyOB4F6j@S1V%kvoFKz2Haxn9=|@d+1w z5a?2G9a?)`F!hESi^Y%59heufJ2W~Q@~VypGK6)L7`0JboyF>4E^R}=grY3<{Wj8D z7#l%Z!z``6)anB?PgAEIyq?C4<~p=+06p7wy4*6LWh7yA8_Rf)`p(GpAg|m&r+!lh zepq*8>u4_0D3m$rux=3Omg4rVdW8X7KMGXu2=E4~Tlv@@-)>N=cO zjVQ~|N(yW;Q`Eidwu$4F+M$(gbzQx4Fu}A67jK-C+5v5a+ZM1M>jO1qX!w0Z6Qf~vMf#O0 zXks4rVY&JBc^IaN9b?gE+!2FSCnRpGG_oNUaccSP6jWxK?A(8DS-CG0w-L$5eZwCFi1&76 zJiCmg+DtUui*P=mYG}Z3%@yzVTq{i6+u;P{`tJClHTx( z$W1cgxDEEWC7JH~VQhpv4!sMG@U6--)4FuBRGQw#2j)T4Sc#vLV90XNC5Figw@FOY zXT-q0&erq_;>5spUg-7WAJ*K#ty^mdE7A0uUz<0+Rg?knp=jF7tKBvYa2`fUmD;V$ z`}pRHVn%D^#dZ#11w~u7JW2GOm)|dGf}|mJ6%rYeQa6EJH=FU?6E(sRcd{=G01B1? z)}=#fIt2TcZ;Ojqa;xt!u|ok+8M@8(8>W#(BY*!@t4_@OK8A&fcbjv$7xe^)A?*Vb zk=;k09}471Hx8|;Nh(_Jof(&QvHnA6!U&)sbMo4&Ha>BoZE;l(U0@Y%;X9VS`G4Jj zOF%2(Nw46*S~z8W)uuEM;1*}Ldkd@j%9Cmm}L%(cIz43`1)iWfEE<<}aT(q%e#d?((J|x^M zvSITf%Vp+^$jpoFUsWG+h})i*4QgV$?WKXXe_l&BAAW0JJB?)v;S1NVga}e#G?bfI zY);miqO$wI0=B`+t=SBBIBLk@$LF1!62fvbr&$Q&26V(~?561|!FY@E_;6{?-ME!4 z$lb!RS|HHCgEq?Th&h2IR!tvd#2)LQi-DP%K8T%oz=XJ2OI;F|5j(u+M%~1a7n#UW zNbQPS2DJ^a@w7{6?sZ+x;}lkr7j%A{GzX75=U2>Rb_-G5=9aY;OpOfGeE*N9MjP^B zldnd--rANCCi%hHl@#kSgw$(yes63z-uCm?5>yR6H4RQ^vH7TFI~+1<*w)_DZZup| zU{PY!`+;F%?sYimHveI*H!EOAvbI@e?Y`kw%jmrX#CDX`U7vX=^@ALgf^-{CoD1I+ z(#kx}u%*iygjUqq!ZlN?$a>e3KZ{Zt4KmIt4@e@1Gl%c?b)BS}U}WW-rgi#Tw)jmKUrWSkjQ-c#Rl zYw479^Nq{E!NkF}b^N>MU2J+S#2)slbjXdvJD0Pv^I|_Xk6(X2btM3$?-z)hZr|co!VUzAskt8P~&naK?4UpFw3H}+WcG^dNk=AyEIuP%-wHk;pS7O zA}}g)_~BKDg1U@rF$C*Rytyw^y$J_vaDu^ga@G%hE3#z!{g(u&pwmiA?`yNStv;>s z=7(4P$oAW$G7NO`0ij`h#4cDSBHN$#aU_@kNe_Hxn=q_ z1x3k>Y%ujc=7tOG#r{b0?K|caMVSOFh|_+k2NT)2D*U|bJkyIE35(mkhGnklG{gd( z4l^UNDHNXNRUT_j-ZODT$@Ye&ZZO(7m(~{-pI>FE$pa8IhQChEV4MdA51JHrn=iga zP#MXLM|vbne9&LOM48nGHx4)D2e*D=d#gwo6u zh}R)n7MVq2ufzv$U$z+<(!ec_quY2}7B=c|Z{?O~nxIK&Zxi0u_Ws?YtUcN}-v+#m zMC*qjlU9Vc-o5d$v!1?ZM~Ph(Ss>t4JX0G`%(^NxXaCnZBV7%M+Gm`u=<=zzk47 zKVH93F&mZCEqEoZ20-hOucW3+E7(AN=T|PsmL@qZTOUIQ_Dyv1+asTE-S_}PE4M1E zJYqqfdHPUic?x>ob*|?dn>U|@M&y^+#ii|2xiJ~RVE1X~lDtcXByacr{o$6-TdIjc645tZS&)yl2$Q4R@N=%)^WA zY2+6jI-X5X3i8Fl;+;2VJQ2|JSmLU8Qe20`411e9JI^?Dmo17-HyE$h?-_?I-2db_=IPG(e2N8cT_;0jTbeQ5`YVm>ooeP$Htw_P;1`!N ztk-(p?>u#sf~}huFFpOj8B5k(8P%bOoRe=Y7gmxqNvZE&zCF#O49J0%hG!=+%4#D` zIxDT4qP6#*tmPYjIJaW%Hg4e!jZvjRMf5Ur2=gM#lre}qGP33Nd)Dj@^;JV07P#s9 z2QN~dOUZE7y2Bt*Q*=_FGk*vw9}73(wV5?>Yf0eB5;-{xEGMuR58rJMzkMe4i!v3x zZ^{6^XEl>(2<@P> zHUwK2R)}f%I``C^9#$hkD&$ zis~4qS!5eS6%d+%0$Ob~kkHmOw0*}a%}mw+Ka*_zrX_^buEv(u>&GE5)<37eclS%n zMj$;uvb+ud^na`srcqYZ2`Mby!?&*KC8$u}10AwtOPjS$8K-1{MiJj?W;kZ0%QjZW z9Q61R%N47gwA0zi=mM~_ghDegS(!D3>+jCh+;#STo`v)%G=UX5t{VlhX$1Pty;Fgc zq=2IfTspR%m!1w7w93@keBj}mC!v$^)6bu+Y>8?Xio%Eocl!x9bX9IjgAXjVtYA=6 zE`;BkSfNp$d{8TnKR;^>&+_rUKC*f=8}lOFd-zBV74N-;p|gX8Vn`;66@(>Am!-KK&t5+HM3x5^t#cAK@xw|knSs!i<# zilxZS;vn#j&rDrAY2fxBO&PFb&cC#bq|?URMXJ4Cyd z9UYxdBM#iM$lauksXAcqTC()ojs1S_iiVH=RBH~ue(>jdfE8AEKL#s&#ukVuHnJQj^3i-_22lYNJl%DcXMf%z>cp{l&Eloq;p#B|{Soq;w2-`-vJi9=XywA<*+A4q$uyvh$f2 z1%8gJqzU!JZa1HAziGwM-4tVh+r^Kr1ikQW3rMkzMrC{Y-&6JnRPGWL4?p zZkO6>FVr*KZG`853qV~ZpmB$Yk<**CXz}SqctN+U#KT<`o9gsKesuZ^D?{ZavEQ}z z*00T6V05(I`|dbnYOFc>jiovgwh_a{ho^m-5V(70=lrZLvBUY^7bYS#qoenp!@h7r zmzP1-Cf0EHsGB+~&Wob6t*nS^J&J`XSVHg{&K6_{kOy$A^UT~oH*=69U?&dNhS0Zi zA)wdw;^+~hl9_~JJjfSjK z>+Q4;qXNH7llmi;;~qJ6o%Oq4n&mBXlTiQlx$ePS_wtetVc}CM|Yeb2D^9kcT|OgC~Uz{4k9X(AlQ1@twC$E!Q&9 z{!fgYVK-T8a37{&lXh^)}H&BM+>F^+8T%GJv3q-B8s=!bu@Wc%5WbS8Y-a_B`N zpmlxn`iYW)_U~0k-k6MIZy;PMHISwQLIt8`uvaMePx7f`rpV=cOjj+32e`I=#x2|So9jfCui;(_<)5I%$Z}*Q!Ap*x*vMtU=tnx(1f}Xd9 zzNKD;sl&W-sLaycdE2TEveJ%%3YdrQ9(kO?HCC3A4>I*@B6rUmJpT+ycpN1jwUKr3 z%$d=|NuF%IwE3b|=+8y%B_~Hbf&-+r8pLs&IGw)n*>S>L)UN-ZrM~LhP;`e+S{l@{ zH*wj)ZPOYSUA+EpyXb$ONsEH@o)#x#u`t-40Mfm-zW{_klwjGjgJZ>t0-I4X%$E9!XeMjS$ zKdIH`xw9rlC>c6xmm(tcpO#1=lR_H6oh5$Ul*R5{T4(&pO4p_gSt;x3(TtY5acnz;QT9v#i_t%(SQg=f`io^4)2kWKoPBJ|%6M+uNQN(q}C(oRX{^@K??beO5M( zNOat$^0FA|&i=u&VFhJYN9p0SM??pgmTS`1ob~?J?1C3LpmT87LGFc-lsmyR!Csl~ zGp${)vt8Ob0^|-ciCvvsHPmkq9lm9hfy>e2r5g61J+=~b^^0o=H+m{O+wYyh{P`KM zdER7bHbSQU^l{PEpja!g?wdT#0-hH~Z=Rv5^W6OLpC2Xt;)7$_va8?J;Nh+Ffd(`u zy`8pk+7t=1U@M%!&66tZ4I@idWmv<6pwy@?zw%=5M`;UXWp#~Bdf(tz##^5=KRT;h zFK~yweB#ALc>L$HI!Of$ci(Zrkg_Z6%CmT6Hf}4fJyK(b4;@jl`|afr_NFd6ar&wo zxL3uk8L-B+3CWr&r90=BgDA3e)EK)Tx~;E}u#?&cf$vwkX|k9kz_-E>)8~w!FcZx( zcc-W|y8$%kNb07bv*WyTDbur`p2U=3_znEknIQsTc?Z8b4&4miHwA_a0`{2;gY74+ z>xD%_L~2LTHIsd1j0;zP&2mBphZZd$uUKnPk_L^5kY(w6|GdJ|fJBX>mhlfd({_HI z8GUCC$f!nA?7U{(yRECSVcS(;>t30#%0wxGOr3)2x`~d|a`WLcv-0CO-TR&=e2Yj}$ZNj@MO{0*@NL%F zU~u)5r;LJZaX5+6#JuC#v)md24tHA(Z2jcXp8l>Ckacb1bA#XUo%>rY?CToJq$>5O zVSZ^9eV$`4HG6!E)*gK4W*)b7um1cv4ah({X|#j~$UI)xioLt>%Qd;{EH%POTN~XW zkNv4tXWRxa7-1LQvMg=ib;3AVfAHcyjqU#I;kwosf2~zIS;-UOd9qgb9$y`IzD=HO zR_Cg+X{xMcG%`Sl@A&d+%nK*71?b7)(?udi0?{gEvDLSFxb-_)u<^ht@I_;+W7 zGc>L<1TAv5-Y`P^}+>$r&#STwP_#eb|e*tZQMCIdhEmr3DQ zQh(RXt7@#%%jagwb?BuJ-gEps^Sh%+?ElLo3qZIp#(`zCGh@Gusa%Wd#!a&@54sRu z{kx<8n6`sygVWb;otpwPOx)dXYW@CumI7MT(5;VtN;8VjOnivz?a9QJ>g>4ZLfB_P z)MM-royi8mz1Q5R1CaxZH924?cDyi4Fx0Q!H*%`ezd94>EVC_(G$r2p#l7IHl|)rl z`!!H}q$GcN6DUN6A&7u9!rrf9F?-~tXxcaM#t{Nx`;7QFhQr3mL)=nvcXuJ&~dTBB?ugYJ%tB-}H{j<1H2 z+cC9F#aRe|p_xWel;&|DGNZ6*Q|;`I%5v+K59DrEWNy_1s#6*3Ex}oXaZpye&9G~J zY!)U+XSV*Ju~(P}FBp4OZrnJw9f{t%q(n}PdZ_>8a?W*E(j4x5YSzgZMfS;qM|yn6 zbc7o7ahvzJ&utRr7z z=zCM!A0&ZWBhS_Ueq5^S-d~t-mPq5R8aRE2M$uoKA)(Vmoyk&Q!KaR^)H_7*7p6|- zr14!}SZ2PVCsvUkZ!Z^k$mCFUmv2=|M%t7a?yo%g93~!bfHYGdR|dZJe>Ac2qG=1T z({G%vUthK(5Roj%&{)e3|6~=~wQZ$6C;8UDElU~5W^f3(VOGLKDTsrwUis$jX;#Ng z?dz6q=A1j6G({MiR6WPKX!F|>AG_i*pB1*!RQ%-qT51U-?H?jz=!0a9G z${(oxwu5`K!37CW5rR+zcI{+wOb4UF%xtDn+6xQ(m#goZw4pbw-+g6_-!~~s)akFB z#XjJHd*z*4b@&TY;10R7t7*~Yl@X~H?^~%XW_h^of^0Zf#S$A9>rm1cfUu3R9Gu-p ztvPxc)Hxply`wv1HFvti_v02v@bfpjLtr=9Es)i@?SPPSJ84piyuH5TyMq%e*FzuAW$PuvV(ql^NB z1};0kZ`>%s^LMKd>s`Vd!}H-vHLS4BR+iNh*##vZg#RvSv7}o*k!yZjNYvi8@i`opA>q` zPd}jH&F9XHN;;{#pP!+$qIxiRJ~Vc_wXEx?y7D*FIy}7oUaj5y^0MK!u<6ov^UAU^ zX|Qm4ydX<9qS>H^@wY4qQMx`ef8bRs42M9$6$Y==7g4O-o9!4*1!djxwA}cqrASzT zN4l-QIk`Nx?Edim6RQZ5ljAWr#P$Cenda8NPXki;>CWG*_*`0f)rXaLFsiJB!%@@T z@ds+#QlaDTo`<=tE7M?#Dz$Z0t@Dv%f^TUtt;VLsw*MaQ9d1u_$M+7~wcO~4PNAdl zU9j1!V=LZ#@;Rz70~LYfCNoU9foFY5aXwtJMhf@++3qV2e`VvD(^UDSmVWyjG<_%= zj*)PD#cUp`c7TMeYxvaD3i9gBo5y~{&|}7h-Cq6Z<%4BWv%zmt9o>2L&QV$!pBi<= z-V4Wyy&Qjl{uou8#fU}1+<1qU9o@5LL=Fs;SBz<+?_4<>HzZKd$S#XCuD8CW74`pI zs@&L)hxqC(YyE&)FI4tWS(xp6M&3Ym%{utW<*W~DDa``BWJ#^}EszGgzcg|Z|6i7x z&Zz^_IC%OQnF>N|R_a9rp1)>cJK$arR|&$W{#cL6;>OF*KgUQzJKYI;k%j70yEtpR zL}bzBKc9wBK_YlM4Z8$6!(Wa84b5ivrDIyP@Y$MfOtpN^UGDmt>DUdno;eF6-Nni2 z`=>2wJ$_|vx#lFx@&R78G@p*o2n3b&DI8`(_?EE(Vfz=f{P;&s$6BJ#rgrEAXnp&t z`rLG}b8(cw<5$lrQ%I>Nb)+P&DA?vEo+wmL~S@8%K7#_^mMz z^0?A4e%CBBtH{I)dzy=cTKkXCn*CQyEm@|e{<;zNH-BEe4TdN~|2_)bt1npsPSCp` zC-;rRlAm5)?qX;tS;CLYwZ0A_eXu)hmhJY^xKLQ>S7x3;QBQEUVhp|K;Kr=BKCQ*4 zzs%mY3R5F+3N^iCi>IiWVE0fX zi@}Ix*}2;^z66K zX}aF*?Ou|ZWoIB8+_q7GO~EKH+n7#`(ur$)7y7Z)tjuWUiJsa1aj`~V z>pLECJCFoh|K%caO9p{-^v0utnYyGOI>(GQS5m1)X&4M>@m$Tg@}1LfuBj^nkTyan zONJL$Ml`*$eu}gTUSH-Fs{#-ko8!$JR-#_XBhVT4kpmf z>EPM=>4&vu^KaL`cls=KcZ7a;0;;~&|8&J~97_p~kZ(L~xf(uKqdM*7~FO^_RzCT`t~s-Vaiicucu6zvWTA{(%*}DMJ+L zYVLd8S*NJOg4pIzr;D60F4-!{H^9;CTa!=UPjA2zwY@x*aAz|H?u zQ%8r4KN-c<;lCceeb)L_QrG*>pMu=%0`hzM@84g^-yZNB0>|Y_;B(k0CC;w? zz;fQ!3f82#ao_fzogQ82in4Lz#;r2P>`NJ6yZ=gP7Tjo(m><4-DGV))6~|balo^#^ zZ}Zb*mb&=YC$&=W;7w6NP2ffnpaF4`I}XsTt;VxcQvX;F5oYKfO|SxhLDfDz<)q3PU3{ z!6R=f6}#-ZR6^$I3?LbrzFDH{k6KbQD+V&(pf@S{OLS>xj(X8IZDmAYBNKD|V*blx zU(#zoJwm?y597)J_{V3!ZAzmD-(7s>e4Dz_pu~=X$Q_bo=ayOEQL6SLKeptOfwfh1 z{KBofW;tqd&$)ira-3x{V5mlav;AjB|6|moS6;M?3y9p5(xZpWNHJIlo!q%XjXuLl z6sUKP3D*r*M-yZSZ+m`0)Iz~JL$-md*GeoMe|wAu1AcFDqxw3t?@TfdqSNQCR#Zg^ z$TOXK=W|OY$y3KlPk%{6Y-Mb!kLzTj{Hd||{z;D2X!G@n8xCma-nFVF*%0drswB}J zJ2x`GVplYw)p$eJd+LrqV4?p`hy5rcDd4woDsY<%hcPqKS0HS)M^iNJNE|{?a8rg1pBMwp$zx3ah>}+_l)Bv z-hIos4SRI)~w@yDZP*#6LzV;H-~^;GJ%>uocQZT$06Q8iJEEHhgL0%lzD4EVc0 zGYj*k0O;@~lQvEIonIe;_V~<5c^7+`@x+P^b?xvk(^fYVbjOqQ)sYp0yJv9{w(5ud zuj%0BnS1#k)Z^@#OAItY3#uz(IcpdoW!8arpeHVOGOq`WLh^~_f#g===QMBQ^Am=;eDj$n z&p)|Wlk`nf2)oENKe#kFwPMZv?#?&^<>l8+Bbcg-yU$-ty@2!;s2mTNK5}|Wzd~Pj zBjFCTWmCI-o4}&)ygG6EljU(=57!^Rl3=FjA?&@&#WYsHNjaTMDwimu@55h!3P+eN zcB;h>vO&w?7bZ&KrpK>4W9h(VVeSs8JbCM~DRn1Rp8N0z)#Tk~Sn79=So7fDJl{9H zvN(o}T~2HekQa|Z+wQ)0(kiTUZGSBK`uO!zwYm-=&^Dg6Y__4Fb%9N%T6E=iN3DB$ z&pZK@UmD8M_9@M!LLq>eUMXE&2{z_=bcDcAE$5+fQ%Pd(-oA{vOe4fw^QJMS1^DFY zUyn#|`K4O5d*`$vh{K)stnD?mU3<#*qDbz1ejE?c=A&0V&xHJAdstgL_W? zeu^@p29XT0_y9A&ft0(#!#zUGN{p-znkm?`@ z3g;OWtgxyER4ITH^3n@RT_ND2nsXW|cw$}$Q?vJ_i~dqGH*eAU#m|ibd&3)h*jRtf z?nom$cQ2PBtx2Mn`ol+ZFn)Zk(U&LhHZN&47!JQWg$P#as`$o2 zWhsSJb`CmmtIVdS_+GU0qM4bvYJ<66UU}Rs^$JyevA3V4iowVs#YtFpHhH3^U;l}T zDIEf@J&3lls`R>-bznzE9miO)oYYr(LtSW=h5l>nADenk>&x}8%^D}gnSMOidi_;e zN58e?0bAEgdwo9kVhNr)scC61;w2YWJ@?(+KT?p=el^gfdjDz=`cNLvyvC25= zLXnYZKc4D-Nh$vLIC9!M{!r`HU#y{woUf1V);ai#;|HHjBA}GB&JEP4mwjjRD`RN7 z$G^Mg6k!~xuqtaZcJ1Fr1*30Mq$=8Zllm9?0rQ3Ds6tC-BnK3;D(uYttG|6F<_v_H zjiXdt9Zf9Z=GPsqx&jWrI8`3VG7n$9q6lzSs8%cizqYxx{mE$`^^x4VHX6LcS5I?4 zRO9y(rYs%e96m-lFHT>&7UiNUm~v}+U|ji&rGga-gah!dQIf+4>-T8Z=5sG#g}F&f zJOVT30$Jhww!E(r*bJ&s8IYxtjRd_b$sa z1>mkfbfsg45ir>HXlJYs(Qz}9&hg!oK>1+fKPOGw8SB^;GnN z5|udIJHx%JNa)^sCdilG!Fbv6RAJ^>75r`UoBNN|PsR$N9WhOw{*bzfe!v;vp;8YG zg`@L>&n~I{EY1qJbE5)HZ(EjvML>Xfrx`aUF@N^F1#O<-sCRQlVUptSO?VTQ`--sU z&>Oby)5_ELjvd6zFJ0hD{SK-vg4_ggQs{gCeN^#lm&Q(Ko?bZTmu+9vBs4sYZB{eNEwbd@Z~qH+_g7jJ!bEb@Bs$&o{^ zqgmwiN;2=fc~X}afj@kB`>M1$sqlQ+=Iaj|6Hy;LP|MdZst@gBORlavnVQ{y!g$P; z#nj5;B);;M<;XF0SljT9XB;igDzBw7H&IrWY7CCSl4=5FYKO1Ow=RKbm-udSqIVW@mVfUDmcwdUaimZZ_yeHOC^PNXrAbuqD2i-UhzJbZ#&CsG;<7blNUmqp*u>h1r2 z$yMe;Nk+Fyn>-HtHbeC2Xr(S_UW&MI7jRr4Eroouz;}4&4_3&D zx$G6NRedlpRAD+*5n#>nFjKZvbiwA&sdriOiZibH{qWaQU2Agurn5@WhYdEAho!1f z&}Q90h@NbHcGO$f(wPzoQWWb?)zx7oQ88&%T(NoY)%Q<()Tx!=hZcV`0d0y7e`=;2 z3rMPy=Z^}NMQT0$)XC{3N9KViP#XJ{6}3|2UC!Pxt_9yRMjpN6Hz%r?u{W=-nnP(b z!*=rMk9lvkX zN7wJate9sMVLvnfVLKZ4>h|lkT94j2Hq4$~o5n^PS;hyo9Dj1fqkdATvVdQGo^n|G z2O6c1SW>aH@}TX0sh*RWdHER=-GJTtLzfa)_0*~@5;GAZ6+Rq}wfxFYEQeX40%Vo@ z{qOr3xoQMdvdjN57-Vqqo!7PAX-T?s&**9T{rNRq6x3_M#QbH(LaJ5$-*0 z)kqDV^?4MRmw#ju12ftF##og^*ZK>9I<78U9;!So$#}Z!X!%jGszODtcS2$%his~1 z8X8Q*r;*yEX03@qDmE45~5)+${6R^O>}D&=;-JGkrx}JkL~*eVr;C@(<0dWDy)pGtLr^6N)%}LZJoR-T zQfcnNniIvtY@h3XT==nKu}r;jxY&u~AS?#88k(#5UL6W5KCR0RD^aJ)(jjxy{#^Op z(hKx?qB0HWBaf*|SG(^@(TF%CUhWi%aMQq_I~`Wf@1lSaPKDq17N|1_Y*pAZwG*o1 zxuWtg>C}Os0w7gsNp%OO7TbZV7I*4GOwZ5ZAf>{!k;-x^mMlIsPlwh>RHaqhy-?$E z5UP$uHTEJ6)a7ewhpK;7VGwtK^34{PMh@CAZhEXvTHlRJV%$bTH>!zSE3T5LR5x>F z5$RQ!`1Shk+;>u>>XN)5rm|`R+Kp~RUZI-~j&bRg>Z$pZd4&2QFp!=?&+OV(t(u|m zQKZ6C*@;x*`Jh~fja8%y0VH+WAU*e?=as4|a-+0p<~VY^K_o=g(zG4wDBwM5BR{l< zwoIwp&ZROkSoIYO&S14<$cs$okxDa5KNnqJ@{R72YR7U`C=uEytrV7RT_o0-d0MMd zE7Wt!g=JTM*vC!BT~B3oZjcxfo+(rSp7mYrn%08NRhCc6Cd0Y9;ZoH{ZK^8`52-`Q zbCiqq@Vp9m(1|$799J!YkV0!!CSBgto}xvL9?vl&j_MuM>;P@2H#)TsFWm1lBUL^E z!^|FJgeX*yye4U%4XVz}p{nzE=S!RyDu3KqeE?Ts9U6u(bNiz7)aoryqX1VXRC1u% zn9r4Z#OkqYrY=&|pyC#}%9@afm834IYIx>q1F7aQ>6bbwu>tc&%3%o1CLt+-(r{H*F7rjkADmbPlW7N|V75LT>GBdN*}^*@s^)(_@2Y-=8nr4@R-Im?;)gIC1XXw2>cv$$n?L#>KIoyP;yyjM z2eTij4YFRHI!+rET;OK?nX5I7)KPD@R=xS@|GTU64zsNw@AcYk_Ox^6G&uB`y=P|6 z%$~M-nKQ-Jd+$rZZiHyA4Pp((0;oX10s_&fXzVSP+=vvZdoab<6@3p?~ec$gw3<(N@j(DEu>c}@R8m(d_lp4I46^=FNpYjv8 zay-AvK)FEkw`I)S%7y2jmw?Hk<0h&y3Jnd}=R^3nplL)+Uo_`ohE$;oWmUje4QeAz zf!(CRfd$2z0hZ!td1BcVAfP3d_cR}>Ic+nkxDa;|(We&fpUSW};Nfj}>EM!iImTEl z(5$M~sx;(X5?{)0rQ%^khj-y}!#_U{U(`Gku)O0X5vJU*XiC+WoJO&YK{%gecvh~E zp(rUuid7NzFDW~*CKg~QBv|S2--m0vH zKb(h&;nq&1$Z9MVMsNbJ-&1*t7c`l~O3M|SlyMyEJkqslw!&(c^9@afFLAhaH$qp> zg2nZ@0^=||rurdgl^Ryti!VNrBy8x!;C{1Awy-wmEp7Q|Pm$pjS4+%%Tql)P<9RSr z8Ux!3Jn|>BBb$64vn7nK3k0bkn^YH4nZg4_k)l9BYPOi@Y7$BEQYW;daOoaQFiMG| zrMw1lHq!AKCx_4rAt9%A)dV^q50;fms{!W+kK9MVLa#LoDUOE zIVSBc8z9#)`c*HZL%0j0`{&CL&I1|9S&31IpE-$P;dznw7~O6pU`?mXN|f|4_PGEN z6*6Qob%n!|b8^ylEpBk-v5Mw1&E*q(h3R2VrpCfOXG;pCzk+j0JTD{_oXiSY5JnyR z;XEG0Jj;Zgq&B`s(Sc4=0v-z-Ge!(2rC_PsG%ex{QcX)iac!H=(9ls>jE(|IhTJkk zE0IAd@a~JoU-{TPr$PM}Gi+a3Po?kz%m$Q`*WmbIR4ci-^x3o7#v>^Q7NY4(qUF;T zp7>$Q!SB|L{cR@X;x+9DR_FTScVnV zrF)N{=^}C(0FeN+`!sXWn{%JxUvTJMoL9L5NOm2N@sMz zg-MWQQXRXwMm7c@yIRWGAf-5wwp}k?a?b~|1=B(bLsoH_P;|L6b~HTxR&-P$@KThf zsFP?E#1m(`qzr_RQfC8KpM4%|S7dCcaK6z4)cc=fcKCo~f(*k2k; zPo2@=zb7Cx9UvR4ub7|_8!{+n2!MI;g?X=L8AB;C%x8qCkdMO| zsK&NjDqH(_r$};t^iWGpM)HUy_{Bb9F5h|7HAp-WYMG@{eyvOLXo7DP*TZl5!?VNG zhs%T{N{W{mL5)maA*;9?esvZkES5n4WinUI7k)pF!Cqn7Srg$EMTHL>!g8prB=ux0 z2oT`8urkq3F+-mUo3hDF#dkd(OMD3SKuh@H6DL$Yg|^a+Od?zO`UzwSbjUQ=Ziu2O zhz+7jBe6}B_TZOic?Jw|Jo3W}f>TH8EJ{v1y?o2O3jvXlB&Y{oqcoOg)Y!rx1Ck(( zfm{kdBT|@~;!THVjAndOORv^0ChBGFloIDG5JV33rnm%C#5E9USB}m@S4+^qO2GPC zPz7VC0&$$c5_QeEX>xL21K~_!u?5W&5s2JYRq*g>jF&h-Sacbl)+s==e67~-F67+w zQ1xILg>(nso5QHeO){!13ve4!qb_Zo#?U9R(uBg&7L!IWQCZfA2`LYLe!_-yD9a%} z14~wB3{^_&wcpGqv_>VCT+m?L(xPE)iUVN=y^+a+OdyS8vP;2Gq=04_Vpe0!v+$!X z*0^ajzN|&6>!;_@hoN;_7E2u~4o8Qr-Ds)fS%ts}wBNXR7}5MRLV7^}!wmPcN9Gm#+P;_3(IKZ`UwXzFJA1HJ~}NXXf0@>Ce`}r z$CDISghH&O%F(JwPI6x`H(hI32qzeemI?Tu(kC{WRfIke77(y)Rx!$bHJ)mV!Iv1SNmf)vGSVt(kKq!td2^AZgaRs4d|&m-DmA z)lyU_5@1`jBiaE*6{Kd_>Bt)IEkC_2X!i-8F=Sjzj#i8`k!gGVArYy!`WWB`xGyj zsv|q38A)cAgn6o~g|Is4paP~WjW*UNNSOR#nU3!7fo`fJXY;G)F*vgO6cg3CW_MPcPIT^!a&%Syv;N7G0*^sbfQGbO_(l9joF1(87RiF zEQH5oK9CBuP6Wj-g_JbOkW-BqHj3b2i&esM#|R2+H>J>npU%mdU}Sb;BLQZiog*xM ze}*ij#>|-9DOhW4O?y;RxVm118NaZ0O0yH4;vxEpbx!eI0gWqfHh(%PXC%YQddzRU zc<_zY9~@!BCd&0nvQVt!xZ#b*ax2$TSXDIma9>8zZSx!bS^)^wu<7O-hsGQN6G4=a z;6URF0ikN?R4emV1%e*F;uwwXI#w|aid|sH>`WWq+DenrQ@R>*dhx>a_}cjjEsH{F zl19_qh$|H|@}SqJVO=Icl}SREq|(A(e?uQWoGl}Gv+QB;(ky?VU0dwdxk!;!pV%|8A3;FFYcc7YxgYTTmd?W)AvPm*8gxE*tfrFjjw_1F*^yulZ zs%!KuDElz#kic$n6E1~`ymD$1QVy{3N^{2Soaa=C9@&AG8Njg^0}t$)0G-_OJjfeO zQnR_nqzx&^*efMMXf@*6Zi|z)uU$VlS?Zy$TcV>;3>d2@5F#mBRLR%(O$~-}U1p=b zR-P?`l2_^GL&8s}c_K}`SvniEE{D;A|EggRA7%x;B z+H(r(UM8v7La_OPUI=s`UkX0Zq(NRP~ zm^jPRETt4(BPW&8^wdV>#YW7AKWXvO;Qd=wN0l{t_1QCEseFWwpnZXFdp{v=EcRF| zPPB-=>4=?&9AqQjERsga#t)p(T&RjXlcX#JsIUowT3qT-ydEq+YrULJfG%-q#O5N33EOv`g8)}r2AIw}(C?8dDQUJ=7|l>N zZl1%`kcN?4z`kPu9YK;CdHP8L>W~DAe4g)2s4uu%Ws7qSnEO@rY5HQKWw9`wzkvb*}?+SBF&L< zpkexr>`h9do1_2&(}jfY9fh^0Sr0Ei zM2fPtC!H^64!mt>BC(`KteU4vEmfi|8b~#5-MQ^eTP`V80h#bdvvpgSb;{gZf0$^f z*_NuXH_xLkg)n1nWg%LdPyvLH22Q~nzM_^IY6IIJpA%zbN(~sblu9RI!i+z0ghit> zR};Z5WX1)xRfbwZV$YHOW)I8A$VS!}B z*UcjXR28U<)EpWhP%BaABpszvBLl)5(uX~fK_|i3s^|O?4N=oMG|Xw>q9yx;NmNOj z63COjwmCtCtFkgt?FW69w!yQUtf;^1x5MW$1lWYRVnE&ue0CX{Ye(%5-9l3s6qe*y|6ZIaAz zuU}GVlW>B({J2h;Y4pl_rhFfYbSWS)yZ+6NmFdRnt;}POd3LE3JiH-P94&=Ky6S~( zwe-iA_9M&1G8bWh^_D|Z&pO*dL8@>CHG$Whh_vg}dcc(Oa_?Lkr9?$=0awH2#*`tD z!H%N3pod#G&M_QJ$=rzfI4C62Vi*2)x|XC0t`}L5+>+WwnW{_&8?%CoWC<;!9jIce z^(YWkYWTC^bLJRDMt6V(Ew6w>q4Lqa6I^UFSW8jNS=bkfbgoLJtg06B96{1#DLXu8 z?U~)0%PD*HhOVyqD41SNxGSGI?ly=eCSH)?!%l@gQA*oSI$IUo5|tEL1F)!YnZbz} zMY81hH-oh{9IwIANeR_KOO=^yMQhUtUs*J9o&_j7%hou7DPC&YOlUGtrMQob*-ssOcVZ!w2BB>*Gt>rbtSKt9d^<%92Z2DKsMjHX& zHF6>eX*Y(Fe8CigYI%WdO~4B8Ij*BaAu!;k3>UTKFwdFse=R^$7En!$$j+sQfst!}_OT$xxnXF@IwLu>NEN2d{_ z!3sT-3`E!2~$E6L^l^+ z(RB^1?3Ztykg?>~wTK#Y(1F#KPj}E+DA(JApS7}3dMOe#tkZZhWMBYKj!5$_=#jSX zB-G%7qppZVfEuZA<6`)PTVI~^ee_~-l4q$m&7gRkCy`4+(B*f=EsTjDCtSsRv5$;i z5j%~*O&&w7nu`@tsg>N1SY4~m{A(-n$#)a-&Q~T5dBX%4{SDYYb6cO94iRtwNXn*l zz4oCgk3S^W0!WO*|49_fv*VX{9EWUnOLPYQZYo86jMXyRm6`Aa51cP!(!;1I7%EETACYx=8%gajKvdDxflmf+J8Cnzu3~UAbw>)macTWY+i_ z|Jf=?snueO(L-%J@z+-;s~S0QwXURMx1tM&LlT2Q8b(xS7qZ*_{gmPR+65ZKI1GBc!g#l;U&5T?{_FsG3MblK44asz6J z>kj|6ZwhXzqY%FS@Jx~yf>4o7tY zv>Lr@%E?LW=}EIPmKOn5M*^TFYJn$MeQ(k^%nV8i#f|<4d3Og5D{t*hJ(UAH?XV~D zh##*nF21X0GLr-!nU9tVHAKqlm&dFt{nWjp!Czt4|>4LB&3}aBBw*4pq9fg$KNAr(`utk*Z5| zW$~Vtn=D^RXv~w_Xd+QRAav4end$Cd&1q@lyL!%7h|yp-%nt8r(bD##El=5gOYhK` zl052F?NSoR?a3PT@qlAOWB)~`Qco*TOOSM_)?Bn742>-e_9GoY<<@9Sq$|&v^bnDt zT{Z~!o;fQcPRkcL52JuJ2Ce09f@ZDvuQ*{;rke)fJ!`VaY>RCu1Zn0?WRV@ja~v07 zy^r9!oU0UyrzP4%G=*=@b0>9yWpz8XP}82u%bdZrODEht|PdkoL z43{~FflCh|6PF8JKpuwKG#kr=ApL}>7XP{xaBJta;pgyop$1Y8Cr)^UgRpzl0EG8#n=B1)A=S^cfK;6!rX*#m&c^wy^?VGH%+q^Qv~41k<}YJ{C(piJx@di zR-qs5YX%@#kfG}zBlPh@!z0~53R!dMFDFZxPRh#2$lX61-V2B9j#gRjJu+EUZc?hI zXcw<n5UubZvwl0oAQ`dJ;u-0Qlo z$f)VkAI&Ii5xY*J80c7SJ$_QPAuc3wppE`&mPI%Pc{M~YP)MZP1Sjpb!8C5ZpOAL1 zUwCED1kRg#M99U0AU$(_1&&N)WC$6QmGGGD&V^P)Mju8vk=wR?;fvUe=~qd30^B z@EtT^R{w66QaK?l0u4ALSMIl$_q!%7bH?@`P9eVSd2*&X+U|oL!2^PQ#}N%d4n%rH z-I6t49Q@-X^5&)2BpkiEI|Wf zK6$8-J*%rhhUYe3))uhuzo=K~wpKD*A6Psw3rfWUpo0c~L0XIBV=0G185e|cE(3#) znsn#ZK1btA#r3>hqqF$kDM?oR zG!`Vu+J136CxzX|br1>l*ItM4&ABauTzk_6fa2szteS>Hu92^NXn;`&@d>vRc>m1P z`dH|rrl~m_cXe%q74waYx2|YmiTaCPL5371ZO1cSA*L}p^s-r$OE2%oS`B_AjuuaM zWeQg2!k@O07#5$~#$AKK-^^HGQ5;ukX2jyV+fl);-A+AvgrZ9H5U4!TL8P+edH7mDH!3UCL9zT|BHcOK z(Gy!(dg;M4PYMMIOj3k557Z%$6(udTeY%@cHcl>F(n z15Q^1OUnLg&Y=KL8D#A#tVq)sBfqXq8S!>@EcfW9LUf?;X+CtTf?fPE;_B(bFNlh~ zs2c%I6-G)2X(;~SgOljsAaD_J9RIUPj&qUO)D(&P<_uC!q?vePBnRovvk@q`Vp^6Z zHKkdke3GSyr%Su!nanIn@J6jE(KHLvuPRmDy<{?mn^e&$Y!PesZ~88pi$GzdU8jp+ z7%;dWbes^V&Aazku9aXUEmG2Pg{@WL?KX@~D zri~KEnZLkS;p*S>_sv0i6e)@GG5{_~Q3}Ij6M^yQ-BniMhaJM9rG6cb86mayy z9e<5r_jZVSwAU_~;LHPk5y2H3?N}Wn7zY5 zB*Y+zp>83}L9o0{2;A$AMLaGjFu(yko0PO1$)Qgj)0L)RnuIh8)FXh!y9Ym6JF79jva-3$k(`kvcblEh{FH{I|0Bo*?KUhBC&Y3lN*@F|>)D_?r?t|T+Vz3bZ z%-&-Nd*`dubSC+7QA-w?94xfZJ=tCPXiHUBe>=Jx59YPaeHX9kq>CoHG5%32jaM6@ z8Qj-KAD;WhM2ravZ(9fV^=ZmW8~`NTX+l_f#w5kKMXIAa$pAzaMqlYPh3PNc(slr` z-Q7!(+;wsQs8XfxEX;DUNKw4zpld4MbnPPIh4s;_rt>%|QK4|2WXASX8zJh0*K`6wG0*$~K`(S# zrLm+RizF-2@fRx_l8r1iU-7SnpilbE_^?#5UpwI|C_TpE;QAhob3Cp$ zyn2!hO5UMxUg9x#&S?<5bWq=70FELhxt*shueBS`9Q|$ragam~#ql4X*3v4p;iVpe zg~)14QmN*LiOls|Itdpt8!r?;+j4o~s#c_oMqR3oi+DvKw1ev>71gF9bf|0oIVgGU zJZ}5;UTsjCuC5FxwYYK(;3@vbHPg8IBY=>B$DVIpN5ltTIAMAw>c^~%lmWOA%B$3d zdFye|?Sf_(xf_azQsk?ycXR;jtABtmOhd_dU1M8&4-9i6 z+=9RF2b8=k2I-4J(4v1|ly@jT6@n^BKb2etV0zcBTkv?aeKLJEIQY z)&U41Q6HH`*gH)#$d*yQ%C$qG+Wg7p8(_i7hmI2@qX`-IXds^^s&V@_g__4D)@Q50b9FxYY({915ju5cr?!zk`E?&s9|F@QN6j9EB~S^hCdiCxS7mnQuiBgzY<%qq zh(!k}W)pPOz)|y&YN9l0{4WZ5s*xt>+@70?W;k7QfiXm5qk86lwQA2YYDW>^Vd} z{Ko1xJ5Yrv>W3Yf1YG&Oi}=z9xJrA-GUq`qDwV6&6k7s(soWw-7H@Asf^Uvz$aE$F zE?gA4Q<|U>qtP+EH&CjkEDeZKcuUuY)y*%*_&JO+oOar{`IKFWcs)@Q*=13nEV)R@ zLuYvV1nFc=2F~bM%Lq61R$;C_V*+U##B9pp_xmgt;1&&*_D=z3{<&^bRaLpW*+mG| zOqQ(XW`ou4X&)zskpSFcA844aLTk!r_z_ z7iPAZOE1}u2e-9p%G|qrE0GoNTR-u`S`PJH&DpCC?Vau&b;l;c6okhYSOmhY2Ly$vj?pG)|w zYM*y$t^`HO{U^brx>`o;W4e_coAK6D@xK$>q93fLD!?Y9A z+&k6J?=g%MfNnK>dq^H9$VEDVF_ z_Yl_h9EyB~e{ePV)@hG65?7*-j}*moI;amO^fj$86vk~;aW{W6siPqZCqrWwu5KYm z@a7{9XcJg&mF$|3X0~qZrXCns;ZN7zb}od0#?(^WT%i&SZCnjlyHt(|eXP%NEnk@@PE z+a8r~FQ7qk6`@ea=QGo(E0oi0AUIGl6?BlTCPkJst(79x)HBn487)2>Nr)MGQWrJM zS3yB+x%eRQx!0f1MkW|`Ba~Gj#zoo;Ty*l%F3moF?mdMd+%LPV%ca zba+;)xU(j-qSj1OTm5{8;e3DNN1LZxBz@*C`jaQzq(*5g4jl;4rj;T684P*CCht0p zIx!#d32UJyaCkCH=QG?$kbUz083cI~Ehp7NZQP9uNupojtW!=#q3}#0nkE|{XJ$mI zT(~X3`Wba_7^d2LCE={>t-Q3yf!>RfMSJ}s`oT7je&_qzp(Bs4o8_cDNi0u~y+EY7 zt5E4ArUX;bI8lc?vgZ;BWUr9@7j}PAhEsqL09Vtu68!FyT6h}$au3(MbgJBbGeIu% z1iyV0zpN;hy_ykg+TZ`;L|}Mzjf)lL@XpheC1sRZ)mf$$F&NvtKn*qZjirkxQwwWv z1XVJU1_o`AZpEh%88gwYQo#j{%an|-u|N!))8AOx14H!pN8*=BLcTN)i z;2YGn%RlI<$TE{#-9gGQzEX0 zsSfEum7RA!zWhZ(-sg|ua$Jn$2Ta@`^l($jWq*9;25t>~g{@z-Tx#vZClW5NKu)nG z>qi-gn%~HQO`P#0WH855!dzapmvYY!oeXJ=9r%gA3Lq&4r-k0BbVO>>_BZ z#`H~{vhnk^S0nF<^LhxWt-PokRvb1npJ|Z}HQ8p0@{;pATqLMyTS*2R4UYH40L65w zX;#jIJbW78!k%2-xbPQe9yl}_Z=dAk)QrrKwbmBfCX+Afi1@v5urj4_a)4AWaM<-H z6H#^NG_v?OYiOty+?fj}lNv_?*}{x=-%aGBkM#o?TRfqp#+UV>5g^?uxd-n!QpdET z(`szHD5>kKt*)T^N_M6>Qu{KORkdodith_sYCqt77k93d#YLhJF6sRD(VEBinKVDt zF>88zL!8wB6(L8b2DzSQ!-LT;37dLit5(nSTVN8doCfg_R7I2}X}%~zW%3u z+)_|P(+bJ6*xe}-y%&fQD2b2{5!J?R{RFZ^fNM=#MaN)bapejkUA=HZf!RT3*5V5E zO#WVd%+3Fl?+*{o&rt?!SlvbgY7T(K&VwnfF6!~9cnA9S9DpCU@K}5GEiFrZ?Ct$94~&eHw1w|YvMz^Cl4jR~ zW+GNC2nfb#bsx_$rOBlzXrtBQF1>5YwF3rjaKpQ+?wg>M3ekCG7awT@+<1 z;qfo(g^uKq(%Nf}+kk=vlpgsww$QP)Qg8dO+g4NWJ+Z?(Vz~#EALkt!>DS%JM`(PV zKf>90?BGlzq`y*##kFY^U@BCLkwY}}i%!}qYpvs;8DeWQXF#oUMkxWGPk8a$eYvz0vX+9j<*2UEwL8blxKVgC#)gVez>6}S4H6(|D1DL*W zwA_Wxptu{~WdEJD@H1)~tM5Mi2$-!hlB0yU3~`()9Hy+OHR;F}@HszS0H}HGK8@oA8DD&h-t*;foi~ z)3Ig8U|(s6Pa!DCoWMzx1&tKBrCjlX(JdWXOOu6m56S#UDfjOfP=Zup6Vhp6_dN)n=nk4 z(a;;YxJLn|6Vs)y_JH4`nY{dPzsB7HbRP5Fi)V1-SqCW1)23anUkGmGo#armxc5)!kB9FGm5B%*2zO5PREI}33TkH$r(ii8%12A&w3C6%0c zNec&ctB#jb^!}5_J?Ts5Kqd7p6pIn=lS1S)++5EzlXum@(`QsyQ%&c!0|}5RHk&CD3Dz-Nl+eH`RM)up2uxx`<}_1%<>qdBQw5q%3*PeBDX72 zJoxlUS4PMd2}cuMmh$kRTNNEE&z))fI-Drer?#wqRfXJI>C zFU@pSD@>DNmwscn#{JDidSfp|ITuGxKid{S!rwfN8v;^6XKGedyT5J&HCwtAhcNG( zO`?Ezq{F)ldfbek+Cl7(TDfemce<*@f^{^2I9h@LkBI}L5stwL2R9%i@Nbw6T=G8f z`0>IoP_6PaN!ow*B)k-)kpiLK9d$TG)iA5_WcCXaS)S*(6T9;-oxoLsVnZ!}(jeC#Mz@AFq)>#d?Nz~$h+%}V9ae6`MBFw`X82>WE%UXIdvRkG}_PZ(2 zwrCL*l7!Uc%}-A=02OKgs#-~NUq>TCfU~JGPiPmqvi~tSU6n(4vF!GWyw)IsDh zkBcpmxu#vQ$#@0>A8VZ=gsrOxsCg}mriXJli5bBM&~Z|haY?xt{{8M1(o@l>@c5PM zCs;;;2f!@eN;vEm_r`vs00|7OZ1o?esT>TH4F`zgwjbQC2#r-*vA6mxBsF22GQ*>7 zTGLm4+QOuLJgZc;@0YS9`m#xi^t^_X<*^JOZKcefh(BHLU|Tvh=i7M>lTfFa1cxk4 zVZS$-#H_7?=5N?<@ptve3`>4rkoJ-;&UH}9&Vw_ER7voa5teAIm5(s@tliiSHGSc$ zgtG947AEj{8?oxa3wtb$=2Erz>At{LcvBXn;r&N&d@lo78p{UHh}l>^hU_aY$;C)o zg$fSLi8lZE)3`B$q=Aa1yoT$CMi_{wAVUKnKiG{CYEy;ZP53O%qI0PF(S&dY?;IUH^S)^~=dgjoloyKCvsQLbR48rHfdHi5QgO>MHyL~QVZ>W6 zo=}_wxm{=JnzL1aoOztqxMi#xP+4fr3$x7H|7w*_ahh5+2L74SoBxkp|9@@%|0C}I Le>y)yKl;A`=HCp+ diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_that_compresses_easily.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_that_compresses_easily.rdb deleted file mode 100644 index 7fcb8e858f7d493ae5813ebcaeed4b32959f5d7b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 103 zcmV-t0GR($MMOzcFfcGP{s0OYdTDTMX>)X6V{dJ6a%FRKWpiI;VRLD0dBZ$Fl?0Ul z003?v0{{jf0svuP009ULUfB;W`V J;0gc$|Nl0B9NGW? diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_that_doesnt_compress.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_that_doesnt_compress.rdb deleted file mode 100644 index 060a6ccae738345f421253139aa0d6e57258caff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 125 zcmWm3I}XAy3;79><(*+UD2e$k7 zFx`hX_jxSsIrnLNOs~>f>^cD0{j31ia?_-66JttjjVLdnAiLt75Glu!O=U^JCtHK2 UAcRKye<{x*T(3D;ZHSiT3$YR-761SM diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_with_integers.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/ziplist_with_integers.rdb deleted file mode 100644 index 18d0594fa162ae766823aab5258283e156f263b3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 130 zcmWG?b@2=~FfcIt$G|08m06IJSzHodo>`I+pP5&Znx0xz><}8tz`)=I#1ae)ADKQe zeP;T?^p)uw(|4vHOh1`^G5u!x!}OQw-#_Mmyv+Y3ng1PT{%6m8;Ez4afrf)DAO8Pm fVEw?$@SpVq1A_zWgZ~gv|Np;Q(ey8o!dImLwy!y2 diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_that_compresses_easily.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_that_compresses_easily.rdb deleted file mode 100644 index d8d3cffd06094da4efbeb6de98cfc0603b9f4998..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 73 zcmWG?b@2=~FfcIw$G|C3m06IRSP-9_pIcCrT3no39G{w4oS9R3SV5hOnK6-xAu*9D aF_A@q4M?&oFat@x1nvh642+5Y{{sL|bruK! diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_that_doesnt_compress.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_that_doesnt_compress.rdb deleted file mode 100644 index b556aa59f7a20690eb80f613ee410bf858a8016d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 60 zcmWG?b@2=~FfcIw$G|CCm6@AZ5TBBtTAWuBpPZjtP?TC+EWyO)>+NFbZpO%9#2V@6 P7vasq;AS4;`TsuvJlPV( diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_with_big_values.rdb b/Godeps/_workspace/src/github.com/cupcake/rdb/fixtures/zipmap_with_big_values.rdb deleted file mode 100644 index 89677fe1e791f21a30ebbdcf94e5cd01fc44a839..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20923 zcmWjLcf2M^T`&HA(g}0Unb|NKF6rc)Ds^?$nMB9x?&_SoL100YRY1KUEU>NwT~QIl zi@;Sxf+S@T5JeDS7mzDrKv+On5%7l#`cu3jD5mum|ID8==k-iK)!+Jl-k(#rPEY~> z=tJ7d&Ck94ho5%a4?q6dw?FeKkALFr-~ae$-S*TUeeyFt@_bFpe?t9HylM4Kg{Iv# z)wFY(wt#f}#OFNo$JN3DfQeW)vD8n-rxhzZYs>3HefvP4Zq=j11XB@XtnQo zNquU>qNM#0>Lf==P`13Lys_yZS8*CPGz47FK&JOe>H5O9LeVD!voeXrdDpWnHpC*U zP>*rDwzgXNK@yh(X=Gcw%D=Y zNFG)q-NS|yR?b>d=nd?{qCj0z=5noC?G4e=4q#UIxxiqELD<72i9?7inP=V{3Y`2Q zZFJj;Ms8bKJ^$#3f&piVo#%~S2-2ly=OjjuwV`w+h*sOeEG!pej_clGJS%h0CcqS= zu#*Y_utl=!vb;!O z0kd9@x@5J>TEjGhzK&5=%LMfaio3Sd{eo3lEc**#;8l@CPLe00YRq>1%ab&DX!L^f zn*RFptN%^ix@BFNSbpqgJ?Bsi&ES)us`Ey6`Dz~-wbhptlETSL*RY(TZUKmCPg>g6 z`Ksl3jqlS2xq<2CB5DjIN!=S=CnG9px9Za%A$e0mUDVJteO?Cy)SYXxV9@QMSS=(i z%?1~^Lky=VO}P|A0!ILU=wKYJcuwwj!nQe$d0(%+{E?|^>mAl};!3vyrQ+n@>|rUtqANvJTx{hj4=Q&xUgy- z1!0%z1S1~joSG;`Eca;Xb+{8rD{eA_0LK``We^u2B?dF&K_=_JvD!O&C}KY>EN(Kr z7_ehwGbkzQOQ%Javl?d0v&coUcBasRxNix>8xLgf86fpzU;V?u@ zFaru+`LfSZD>Kivf~lXTRj_ul+S*W0A{`^rGOu?sZiK$H%Q$K~^}Vh7rJ!~V51~O6 zB-#A;n<`LO)__c}uq!=^E1Xg5p;Pnn7=a&r!R2%6C`#6<6mh|}@>SQg2^nZISdjy0 zK{J{YWWbo3X=wnI-B=DNk*!AsfH3H?n%Es!`-BF!Ub!vfh)^FW%= zbnA}M&?y8p+txHh=q0zbwL>dX=~WTfu0SP^N~^Xbd*ja+jL>r{=G5Niv$bUNj;WD^ zPOSb>J#3S~_Z=P#&`*q{jw>r4h^X~AOS_(}5-WBJ9~p6!*j2@R2936^?IP|)=*NXK zYw{c?&er2s(>5zh9)^AD*+UNolNN^LUf~&~*oHKi@#fQtg;3G*r)u-0sQvrp5k zw|QwS+LDK5L30mmU0vk5tWw|xNxlB1T6js%+SNdp zhHm5~@N~q(*luymjlSb;mP4z=*1VlUkw&~)`-_L0)Pfj=LmL1AvC#llLQGsvqSRXh zt9A>lF2rd^WK@|=6j*|FIIu#A%`}OvMWZZOURJb7*55y(Z0p{INCd58fB>%j$wiZf zr4HOO7A{HZt_KFg&GxrfhaszKK`kb#+|vV;+F{w0Mw6C4=qws7v9QPpCcb5M!&bQd z&&yo5t4MfO5M+tF_SDt9Bu!g%i6qhku`5_(xxDk@J|a~f;dVYA9EY)X*!tmB2lPb@ zanG=0Ah!T85H8T;vTu6avY{QPw!iVrS;{K6V4M4^yslG>tgexzS;lcGYOjf^hPpx9 zG$Hd>xx~g`SdlAC-5anKI3Teh3mNSak=pHfGmcP_uUu0tKfmzdMObtl1XZk5Ey1EX&2M6O-BD&QET=dd}uJ?Om|rk8r6zGqdbung%fLlQz$b zN=BYZgF?8&koqiWSCh`xy`ZzxK17u-2u`EHZ5tzYOA@+1z8+Xv;ZZ3-850Z2uBse& z%6$_%YfGzMWD1K1q31csG`o}mR9cidO&b_#6xUk2yS8eFgBlP*t1J4lM9I)4t{?L< zAa#>egxjm0Z<tB-C@5Zf`9m zz74w!#(v}t+ug;8M`;e@JV^Dl?W$p*t%SSnDosh%W8H5P;Ke0k0~s>2w@J~Z!fu^l zHHeI?Ox?1PLdU)k5OEe;kn1L>2N%=%z#&{Vso3}*`!}vOZR%Bn>r&IJqc$oA38V}_ z7Uco18|X~CI`)h0xdn!XIyCbdZC^c~2SeuNp^gjSW9e@EwIp#+}GVGr?xNK!Lx=w0={?niPhQQ`i!S`)_{i5!hBp{JwB1{IoXT0|_zJJGR6_V5)Sl$EA zG!xyi%DADu<5xKaX@ls6qVCfmH!H?@&{t+~c+0q9&gR<}+dAf+Yp}l9ddkeLO-9nS zPflx$hh*z-Z#EsbO)$ji&^oaZm9j7Ez!&Qqt1>F$VX!ks`Ow9#i8Ac55Q93jD|3kC zSa|>{@1ac8m1p$aFq{}W*h#F_c&M_l z#REvBRfc9~#4$$>u1Id+p%24}JtXJ$1NjQ2SRt}!-__{kd^b4sS$$0 zIr#F#t}A=vWR)Wf>!x+QJddng&mvG5PD(Q1@F=pNGuCr`fBO-+L5!GJtu%sxHZd}= z-;y-+kQc@7_75+x$UzXs1=+fF=3x(Z!TxK zRtYB(`9UX#*zy}HTPoes_jsPSjv3QnWWeKJSgaD0Aiwfiv|)~%>|lAhX=|hlKclg4 z^}E_lAo41s4_lFerjn^mE!RW#X0z0nMU*$G;dX7f_g`a%`0+o_hrIE^HqsBjx^2(A zK1huI@HbaFjGG)XqYfG_y{+t`-WSAaY9r~3iWf2FanZ}II~J|BB6{B&9@xHXM#|g@ ztIbC~5*H2DUDLC%^sJsmL*LX1s|)0i8j$tu`k)tXoZtaBg+U;$LihABBNS|WW#a0w zz4q~mpZLSc`?O^JE1G*an_ALMlb!WBsC!Lsd8cn*f__nuyi1F6sMjC+NK{6>BY0DK zv=2JYBFhW>-b$0&>sftp-31@9s6sR_S;2^2k)!vHVsHD=iv^2HX7hlt{SR!qi@gEs z+>L3SWseyZ!{+`Ju+)pTx&=Lsbyy8;e!8;$%_+^G8y;Sqs|v`CD&G3aN|KxA5JaF1 zqg2Lw7guoW0+d;`;! z)^|}#801yqr*Q4hXTH_hNw)sxi(%OpdFT%*=!`rgzyMwickkVOmDX?m-N?pU*>$?$ zdRy2mr#L{do7%hidD<8e_VU>7JE0>6u8pnK+56iWYk^-i>u3W)VdX@mh?H&*9 z+WRgTjWg5(s?9tNuvvx2e|`EplQIg$;UC`=Ga0MaCl!7cdy<2Oye|m8cIx}buZFc&Gc?57+qMo0EN!hAf#2c0fF4Mk~y`5V5#t-g% zel>^fFf>t8v~Aq+BrvO_mm z!c151BXdRFU5%hFaO0#d@Q;eACqOf7{`^fDO(iT?Y!@ju%o62p=|Fq8Tuy8=i?h11 z-9bWYcQq#_aT^|qA~c059HWte(RL~DN~Qx+Z#gSC3KAC;sew}H#oOs}9Mw<1IqX*AH9ufJ78o1Ym$zPGXpvOL6uF*mBaCiHA4 zXAWt5YiL>A^1xfokO3;UsWGX#fU`Q{4K-Yeh-VAPO8yduY-lFF1l)vi)n@ubm8f`QX%iDhuDeM%Rs|z7LqkAeWn{5u@;utC9!1hRM1c-KQRqB{W?)se2xAVr#)U-* zK0Y>vZgcBg?Yp9DgRWsY`kCun9S(!+5*C9x$1G{|qwilabzj7wMNA}IJ>4&v}JM6KGSt_sv2_J-8 zntf-MVs7v#?|{4c?IlD~iyMB9VUXybo~uedE3mou#(85fvBU{G-8xr@yf01L6LGTs zh>47vcIR6c5j2aQrIl{8o_T&z2^-g2-#ph8o`nrRt8HZc$&%p*b{`E*KeYbIY?i^O zs$y_)<~T5o@V-7lFw2km!}Y%E5{wev&?)Ki24Y#1ZCb~f+vDrb8Ag!>J|uQeHorZo zaa(Nv+&BvH!LwHYh)OqYY7aMdnyf#5wE!LIX_ZkDb*OP$Kk(Zu^J^QBtcQ)b(7?da z%YMjm+-0I zJ7!r1{2Cm5apbwj|G3=PN!a$bhYHu^`^%G@>%-PIJ?yPnc`e?n8HL=+2eG>aYR8gJYCLr*E5ZUg}2=o9k*T>|z`K zGy2xl^cyt5FJB*{E>A0W@T#s$cn$}D2Wj&7whL)rVd^+OV76iQUc3L|b6pOSW~eR0 z1jYlaB+f$`J5e;WKvW4U(k3aZz$K*4d#CiK`k=Sf&YP#EjU8d=7a|+`mhJdy(8X=w zMe7faM16Yi$mkEgK1%D2C!F3gwg&&`#arT5X9z?>ZIjz=$Y5v%9c^jcb}s&@87a~j zu+#5dC3%$wJaTA>VeYUjGxVm4Wo4TN?8>e@?haqJu{=#PM0NVLaqbn)Z%+d3*9Z6B z$Oa(X9M!(lWTp|JHb@)x_wx?U+oTN;{_AB=TD+|o46Ky%oi|P!0L^Ik)ic-49ht8G z#In(4wq2%OPK9I}*UzOkLL1?ajhWMNGNN$(z7eC? zT`UvMv%zW@K7Q5M!}jJ4v!<gM_$Emow8aI zhb`YEs@iadxXDx41jta&4z<#D|3h5fP?-_O7EOBwK~$$@pW4Ttn0P7e@sDT_e|;_u zP3fCr>ka2w%HvAffw~l+GAxf?sI_Ou8an>T5wse-bJtvqDX9v+f7^K+NI^-~kS6Wh zu-e@o0ekm>=FN>)-yQ|^N5ydD)oElgCl|uca|L@Fg1n-I}P>q zJ2yJ5IQo(ExV96=@ax3)dMnYl-#sx}&X2sk&o6X!=a-2a8^+4X{Pr8haT)HsaqH#d zRyXK}N8H)_*0PU!N&JC8K~r=0y)#;n*vH2oBO4lwL7}E^k>; zbf)9mUMYRY-2InPXs<1<3Zr3C(B2GAj*=bu+JY3y{5Xr7y$2qh3={9Lw&~ zkB)*)zwbQHQAC5@2ouYbaiHHf%PT8sx`Qt+u*~pU8tBHB*#Gr$2I1Lj=6KS3xsS=Q zI*%GEthHMf5;LVZ*FAa4)Fk}#qaXlFb53gpEWp{%-B?l2N_r%z}6q|e~(OS>w!g| zv~>~$J_^?N=X}Fuc^#d;W+sgc!{|8GqRZbLiCA78he+-(U(NR3e6DLEz)UC#zuN!y z)Uq03UxK$v-jgJfwvB9CL_uy#bFiS&A3tlB$6;g3gP*}4*YdTSZXtba z2X@jGY2ZuN^JpNwEAwH0Wdc*!UM-KmSF1KIPm(4>hx*7A`*&~MdU)r28#|p}QL1i4 z5kGx4`?EQNAZC$Z^Ot5-Jy^&(eHr_)k@~FOw)f6C0c$90L$ z)R_kd2Uo@kg6k8_A=%pGDNmiJLKc@f83>as6fvuUVw*Wo7^(FyGT=Rir4`c!C`lP0L;A|gUZ&FuJ zcxhLel~A{dgY4ke8=yKtEM`Y6@nBUKf#FmeAD*E&jC*7MwdcA(c?YA+I*aW5pC5b4 zEHq0%`>WrWt4uEVz_vcCHCy{rW@L=;ejHW9HSO}VCS8#kCx3YPA66V)cZ*WyQRpUq zu=6V;TOL1rrFKn&M`oXdfo(?n2U@xJ_K_*q?p#b8IEWg%HQBoVsi@_NWQW-(2t!1gtSD2wpI;3=gCKb@{fj@A>pyTDbS7t=~t%BxD&sYR)BA zt`h!CJud$7V>EB`Hzut&@Tg#4&a!YW5f8GV<6UNjbeNiTU&8GjtvP*~ z#xAFG0y|Nv^UdF%L0IWsy5G!VFY6+2{q@%sVNk*dvCPbkCN?9ww>MITVVVEzO3o4s zw9G^mubXiDJ(Knp^qKo7G8a==Hw49IV#%W zP9PoVtB7RT$OsJvuGdj!dh0)NLtTol>7077bJ^z995~&3_r{whdE23@e>3qT1nAL; zsrg%P*?+%g>vvAPETpv5tR^9%=oMp@4rIx(SNR_c}=~bEm8Vi+HvK*W2HGZMzcJO>+fcump^eK!I9U; zgTZ-`chGIO?_9~+wj9*@!Xs`fL-NyOMMuaxCeVlW{>4#aqooP4OmX>7Co=J^OLr_l zFNwSyHT4FWRT)49I=;G`ha};3Ma{tU;l@RE9GtpiDC)K8%m#H32UT)$sEs-{q1OZg zF)tM)E3;zPy4i7T25?>2t?fj95@Ar8eu?12#)+_YPS2w#Ca%IT1#}r}{Htx78=HZPsr8^3)};D)!Kbd6#dQtB}n_+g$tN zf@7n+l*&3?`_v5ioo7~8Z<*8;w+~*lT=p$+0pKdvuMK=M!-+3L^X!E)C(m$ z&K25QG;fDexWcjZ{rhKjJ~T01`<_v#5|m6vAT>1_*{(=bR| z7(M!Zvqr6B-NLBPJ8nW~=XM^`>(qjK#$pxqxRh1zbX!*@cI<@Pe|Px>lNtr+?CB$? zJ^tuOinYHvc-6dGROa!J)mOhbi|fI3#oi0mwxDn>n`0ruOW$|;uNOK&YdcPHl7vN* z5Pj<%BkFDZ;d~S5R^9RF;J!)O_}2E5#s(~|_3DNGuNpf1r)eNUVq9B4SX?$}Xecum zISCD-vp-q?>edq`wr{q^$+;R8Afh#MvAzGXqcwWvdSYbK_bLPv2O+}<sO{>ZKC}6B}-9&K^GI4*>PPG>=`oKYTBZ5om{pNk4&1A?Hqec7YwBhQN(a;>TX~vo|Jy@RlG%4uz zllSk}4E&(xtbJ!@7rpXbr;on!IxX1b^M&U&(l9N0y7z+50}P6hc=!*CfEUAno!40ciuj#fzGt}=$FagO@h>v z?5}sh)a65(>QA2lAd;(}zc@&PbleOKy=AS_^d-m8zV`aLR)NGUtk*S-?Y^7^O976A zEh)1=L&DYo8o-QYuRp(MRK+~RuIW}Dt*~kocDs+guEYb3 zrIq$vQN1B%a1$?9Ro1(lmeFwbs4>)wz30y2tXD{iKWe1=^b720mh{x}_{K|SN+?Ah z+j{PDAebm(U@ENWm1lR)*NtP~%=Fj3xY)TlSH=Sh_iQ|drGEK?vpzQKu)Fs9aV10W zoOz`<^N=T*|GaBQvmDx~>(RH5N(o=YpE_0%$olVUCH>cFVfdCq-=qost;<6VsM*Az z&%BsiUYW!IG?(7KKqxOcS7`L|+vW?s?vi$Judy+y+SAz>W$E?@Z&m;^wi{Zcwq0SQ zmjl31;Vbl03JvC0|8n!`T6ugjBXQK=cIU~{sB3(8XKo>{l&`RCJ=ps6G$0;wk5;rA z>*twn_RP_{rcFEnL$yjucTezt%tx*d5GKWOeWR0`(Dl=7$W_Gdbh*fRy7_AwJpT4Y zXDFecizJq^gJDwX2$kmEoCbG4gf^yDRynbXb|GltN3T8d>SThf-Z`ztQ68aB%>a|S zS{i%fK5*c#k9`mv{=$5mq}X>k-hObxGaOyJ=izoBY`17wX+sKq30hG`P%4z*+VtqZ z#&$GZTX~q-g?UP1*c3&iyNS^%UY01T!?CgZNv+teXTT|_!%xhSXK$UG!?tc!6*N9K z67uY;`oEAbPg#q71}Z^Y>8t~btG_>`@|;qD~!)AnjNydV$k5`v9oR zHjiC={A|rQS=3CTuJ5s|#X#>G>sE90iwnGA7L3$do&4h{&hh^o!`$8f*dz-|<1w3u zPZ)zfIJsBxxerWen(44z;xacfhf?mSANx7yE1$zM=ds`}!J)pP&6!Eq_qnr)$EHDd z-!(O&0joFo=A*Re_@_19*glU6yY5|G`dvt}0X*jRk=l}cS_`jybCSy{JiL2}wQZed zl*6L$*4qho%Yj}Ub!6?i^MM6n&lLLkkLx@=bYvNJZsh=OVLy4yb4Ine^dI9+>g)#1 zxwbcF20DsJ&HnLv&yLd+3`ur3N;&9{kaVo5nZLQWHK2?0Kir`<*dqXHZDB$KLjVjZ_sfEzl7e^>xyM2Mw zgD^U~;g_$U){c^Xd!t!X>MC#9f9;aecS=Vk$^n<+w|m7nO|&vR9&mwj=Xc0MX=4pB|fq#x~5)%zcfkoGUboBmSW@ju6|ws+^`mr7Z#O-bAZUUp0%UWm?JQZ;XMTUOiJ=v;oZWstOpNhVsmYdxoI2$y8i!b{mx_czgqKe3aXMt z$mrsd0<-mU8+Ep**tPSmn?<3Yf-pz3|HEuz< z_GPsjQV+>!OoMd&ai?mAJ=;FdQKVQZGTTU|2npj>{JSGJzVgI*g4!bXEAz^W#-7^U z|ESjDZ%q3^K_m80r!OCQU)?bwK;tFEHqM!xm&ku)*iXR;)Rm&!X}=Pb|*_W z;TbiqzV7OMnu%^6Nqp@!3%cz|B@;4pfsR=GQP+lsPx zwy1sSG%|De+P_XjQJDunHFFC$jw1A(iO9R09Pj(!uv4$Ky~Moy z?-Ly-$mlgf>(iEFw z6dH#Iiv)E;=m;9IgZpPjo_5W!edCm5TvX^D@Rs?)@&}4DoBiH$WZC{;xK)tYx{223 ziopP|m-zR_Nt4)rIv?n!$1!5Nu@>xG8i>dq-`Fl;{aLY%vW(-wMa zMO{!?^77)mLx4gkBA{Q}g>!(0MP&L%k2@z)Pg!eRF^Sh-IZKP&@GA1+5v-5S&qJz| zH`d9~g=08UP$<{f+RxvNxa#7Rh!4I+OI3MM!KI@T0}os^y2@Iu}7lvLT6U zuhV~QlB>La@1i=Y-@X5m#iVJJjyHoSJ+-ywXj$V|ZbHr2Ny?KSx&8p@AAT}IZ^A# zjei;i^l(WHU)AZ~FE_42Sjy6>a+fn)`NL<($Bwh|@b)>s zvOS*W2e%v7U#+JiE9{ay509%Y5-9BUov90?x_o?P6nW&KCFOpCPEn^A$jbP2je&UDTfFu+ z!?9C%;L8ggtqD=ZlX&M*3L4hHgN8BoOMdoh&Dt6+5TxovQCYD}RiH$d8Gk=XGl%Wo zxukr$DxIiimOf}-!Bxc-(2u6c3!{wr@&^M=3u ziBae6Z(gWzWhN%AlzHiWQ(b<3a2QOXD&VOsa_mSzK0A&ZeeKSrJaJ$UL!Xr-K79^eSkEV_h2s%)u}Bl{@qXY< z;{*d!y$)Daho2k?v3CE?qi)U%!nsh?oj73dSV2ISx)C`fBMFXQHc3n;TK~)xD8Ab5 z-vuwv6oih$oP6JmmsX)VmS3Fe1)=X1gC$_1S-a)Zi>A4) zXg~YJ#YV4GOCV7qzpru-s>U1y4Sbqb@pBs3zci+G%Kl8H|HNeuh6mTJV3QJ3Db7y9 zx~MXmqm-uot%r8)`UzZQXE92neq?5v6kTqW0@^2m-#X&R~O19}xpa~5-^ z3af!CxW!dA9It4@wcz?9M7}bx%BQI|XWte$JKP+*d29TDx1-XoxZivI`X7(;-Zs8| zv^&a*t54MYwaG~F)_fW{Av1T+T_C3Fwnns5WjmURq6Yt^nQL;@l0Is=83Iy8gW)Br z*aZw~n4aD$CEEArhTgaHtoCG5Tv-{BWa|sybIVF*TcWNlJ&-UauNc*8L-I&v1ybLv z72rV~9&+rc7LIFL!q)phCh+T;x&Gz7-=1X{8N|M#Mr$TuF;$hJCOiJi>lrF?eejK9 z=t#tctNxZ(U;XB!EmZmE&ILat!otkx(2M!I|3{x!R4APo)3V6SFbVWV;h5aP;nuD5 zJq*Cm2!5s1z?QA~c>D+_WrZYhD;#f7(o%J8yf_p?7sjejxO46JbP}cp*!#0l3=Us8 z*Q&WF9l-h*z-#6VRlq9ZdHr20u_aZoyt_~mT#uZipVOSP zd&X648J8xGKg4_fq)-|C-g{N3_42VD#@qiqPeqgkG!r{7oe3}QRLAj|(?JAQc5PX6 zz+b4y8~poB)uYvtag(VWfaIXM@-&UZ8?+ofdZHLXy8pt<-rE}w4&?`4)SXbaEbhkcmvzvI1R6S9tsEIXHT}W+Z@X2PX z(Q99yMr!jh7{9mv;JHw_qYA`TUkuytnzBTd?bgTVn$GQ0iAhvN+sTNz$BPT5=c!t6 z*|~>8tM|Wt>B(c(=^($51TxNArmlLWSK1{brvJ{Z^WeibP?VVi!JaDKTX~uIQAjtx zu-h!QdXr`uFKafO%*8s5b1`hTBa!V&HTNIA==7ZU^nO zrDxGt=Q?MOz55rdN!-b-C{&7{i7ZSSCR7#1F}}-xT_w(qo!cj!5%e40vZ0`25I9v@ z7?pGNKQ|^1Q(Xy_-~q?3RH2G^p6wTbAq>FY;-tz$@A$46bW}~$J)2(+Gg9jusvf=H>!qqsP>`bkE_RO+BSFKe$je^8`k@sojUuLs9(_5e;`-LjO9^H|dGbE7rG56|nVU(YYELHD*?!{n$WT4I zFimZ<;n)6CEg!fip{v?rKRx=MkzQ^7?@^v@+^WH~ zgL8i2240!xdhK5Nn?>ZOQ5AYw8$bG4Gp1_ijZujeqPps#YLld$#c751rkQ$Ug}b{m zmxiz|H~vy>urTl1okqjkAH7~tYgNRzT$!W^?sy-HgS*)$rs_8;)IGg>8r8MT51up4 zAvfK_Kh;#BUo*DfFe4VCeYJmXn$iyY>mNFO>xg9s&4sio5&3@=ocWI=XMKmO`k3kI z*`2k=UN(xcJ>7F{&$VyeRXx))uA}R|?<#Omg6*u5I2N$j76yTg3^u|Fag-pkQ6dE_ z%&8R^!I~f5 z@S)fgwv8DD=qM)hTQA@E|O_sIf|xT)4kW6i(m2lX5rS+LaQ0zP*C| zG5<0SiUVk7P0iibO=X)q|H_cdS!{pm*a|S2rp;vSYbOs<{KiWwdEfwi18ZytmQ^(M z#@ZuPtUS`yVHq9&HC3?BoK|xl{)8I2b5>{q_G`fQL^j<04a(g84qe2f-=Tc&msHKX zykb{5Vi)o2tTj`a{9(GRhpE?uW+rBV!93IPw2%u=%YGSyL^hfb7uOQZ{}&e_kjMa~ zs;#;cNG+>$8C8#`e@fe-{Bf*&Mu&8yY<^_XiFEjALAd|7J>s~IviC8{%-%*-TYoqp zHt!)3VY=H-FUMlxY9%if5@U}(L03c$`bwO7aiu(cR#Iw2Eizc4Emw$X>A=dbxzR&( zC5-;D&sIz;Mi*WkR4$ifMn31(Ut7!sDfJmA8lUMTfxP~WW0w-|m@UDghDQuJHDUj` zn<=Th^W(nf2A1IO+4{Sf9ORiS=J+ zd%!`K+mCh)h*cZUEEFWy&~q4n5a*%kf|0<^(ahbuhcG$=z(xVzO1mrPz|pgAU8!MG z2nkfch>t=uma@^a%LULu{1Y+;tC6ky>BcyEp`)eN^rxLF%lYXshUd-3dl$Glj<>oa zFHq_1ksAb|Nad74TtI{(#f$9ziSolUz6l%%3-wsh@H7YRmNfm}vl-A}&P5T!D>L|O zCaa(;qHf=FDlK`hDCERa47=I=dKX~gd}Cz+E~&0Im2-)1thI-FoL9kL`9;rxcM)v< zUC&MeLq2(|8-z-D^37EyE1Fym$_hf0mw-fVqV*?xIUm$1QN+s~U@S)`9WKm{AENBT z&-Gx>73}$wMbl*FMhrf=OPA{GC#PdmCA{d>vC=?**qqZId+>oXC9HrP`IMLqNN$Ls z_{3t)@-c45DY*4n`mEFd?@v#^aP<**I`gd47FF@${+RnG2&fa7FFV*_Ak3@(K$u=Z6- zQf^-1WdL!SnS!e=NW+V7re(6xsbiY;*t>2ZBJl_b!RE`TowU3*z0hH7^W@R-J#j}Z&cXfqCSiE{2`2V(+1YUgmeD<8!TDN<_JlcFzq~%C%n%YHQwYa{MGYU zsC47Xn!RDrg>d2Sm9k|0#+Dksw%bI~C%q-cGXy8K(I{fC9K4TK z)yc0gUJwnn+0Yg;j8)P)8z0bdF#hdTnDVNlXmg1NAFXmx$Rz7ebpyu`PkzxeLp;yZ zZ+B$A-lw+z2iTi%ZCQ-m*F|Dh%~Ue^)+*1`ZR|)yYa&^JbVAaa`R1ZtOROJS9{bz? zf@UDfa$InP;*YNHJ=NhYck}0ze)7>}GX&o!^I2Ww{CKr1E2Xk|=ULV={5EeYbXQ2xozt=2fQQNl`j|O;S+eROH@96 zaUoM3Q{p19LHgQ*i(E2~K~I}&*Y;nd%<&p+w(|1;FA;4C_Wp@>81_1yTFj+0f}lca z*BQ3baI#PRqy2vA=>^|hxTOnDvsEq+<`D6HD?`FzucHd;;olEoqqlm*dzz7rlY!<) zKA*X=yz}1!mXu(MOs?3yuS0la{US}`&7XFpARF(matUdRJb?jL1jx=@ou`zj?Pkie8++k8A$ku>-ToN+K{ex3(1@B>O=hGTM5h?QPD=_ zX%~iyg%pG@`E<-t2TTv<+oWf$xYI36nkU%B~Lrv!$?$Obh!i;B?rjTCo$ zM@KW1>GP|;3=yDe9jhxhk6L_%uq{ELgWMG{Q$ z+v&5XqMF!Z?U;pGW#QzVlq>xgP3rY_v54EW2z8A-g$c%N=l5t?IQkLIll>>D_UI2V z9{d)$+(%{sjU4$Zt@zX5TCNqwNf5iJf~XkXaPs5HYBy7biu+Y3^7XA(yOEmk({~T> zO*MiPYP<5Lp&m(j99wb-W$}6+;RH}U({Yyx@Hch*SoC}+IzR$uIhfCs9eT|SuhXt5=4yb+jgSpdMSHeIfR#= zOK^JO1&``sjyZ?#@(HS9vm5(y8d_3xaOITex``wUAZ6jtk00&=n#m`(!3ku_5#Mdh zrWPLUc!o9miSXj~yB3H+Kq4DkAHf7$vLbowu`>{%5QG7x2B^9$Bje!XJgk-wSB_C5qd%4&W6;5kZ{p6I8M7~r(> z!stJT72g8ht{9VZ4nHs5GYqmQtmYuEHF4v5`beKtJ_L{S8dY+44oZnbUm$9vpC)80!_*ZXOua+RXTML??0Xh6f-nSMOoX#o z;0K-+`&np0U11{C5xFMY{dYR>SbVD;zRIG}eK!Do&*u`tNX%S{Vw6;l{|KkQt#_WW zV5+uq3S*Y@L==(^U!XYd8+_Kc7=#5Zc1%m+-oMQDaHtS>*tMTg{^n;W>EsHo#o zgKJH4z}f!1qgnNAsaHYjo^PG&88J)L@ed|np!nH;0B!>RcO(vk1~6n;Lh53t-@}JO zkZAqeJ=G9eb?3`{7f~;waXR3CGoPJ7qeD4)vDerP%WVHcS1c`W`{JSjp4OH*edp40 z)8?6mE{x#%1@|fjPD~>;)5nKdQnIC%O0J-9KX&lRzD0ai@%NUxexj6X4^qzd|1Z}p z=Y*;0#FAW2u5Q0~=VN`_GJ=xhpX>Uq;Ye?$Gkf~b0Mub_w~AX$KCq~psH~uRCyKZD z5QU8l0t0GgjiRz>@o52UUrB0$gAdUtIo(8w?B1d%2)K2b&e0FgsXEe7GMFa~Ca)?u VnTj4#RK51vy$`En>(8!!^w+DMyQu&G diff --git a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_test.go b/Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_test.go deleted file mode 100644 index f840f96..0000000 --- a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2011 Evan Shaw. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// These tests are adapted from gommap: http://labix.org/gommap -// Copyright (c) 2010, Gustavo Niemeyer - -package mmap - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -var testData = []byte("0123456789ABCDEF") -var testPath = filepath.Join(os.TempDir(), "testdata") - -func init() { - f := openFile(os.O_RDWR|os.O_CREATE) - f.Write(testData) - f.Close() -} - -func openFile(flags int) *os.File { - f, err := os.OpenFile(testPath, flags, 0644) - if err != nil { - panic(err.Error()) - } - return f -} - -func TestUnmap(t *testing.T) { - f := openFile(os.O_RDONLY) - defer f.Close() - mmap, err := Map(f, RDONLY, 0) - if err != nil { - t.Errorf("error mapping: %s", err) - } - if err := mmap.Unmap(); err != nil { - t.Errorf("error unmapping: %s", err) - } -} - -func TestReadWrite(t *testing.T) { - f := openFile(os.O_RDWR) - defer f.Close() - mmap, err := Map(f, RDWR, 0) - if err != nil { - t.Errorf("error mapping: %s", err) - } - defer mmap.Unmap() - if !bytes.Equal(testData, mmap) { - t.Errorf("mmap != testData: %q, %q", mmap, testData) - } - - mmap[9] = 'X' - mmap.Flush() - - fileData, err := ioutil.ReadAll(f) - if err != nil { - t.Errorf("error reading file: %s", err) - } - if !bytes.Equal(fileData, []byte("012345678XABCDEF")) { - t.Errorf("file wasn't modified") - } - - // leave things how we found them - mmap[9] = '9' - mmap.Flush() -} - -func TestProtFlagsAndErr(t *testing.T) { - f := openFile(os.O_RDONLY) - defer f.Close() - if _, err := Map(f, RDWR, 0); err == nil { - t.Errorf("expected error") - } -} - -func TestFlags(t *testing.T) { - f := openFile(os.O_RDWR) - defer f.Close() - mmap, err := Map(f, COPY, 0) - if err != nil { - t.Errorf("error mapping: %s", err) - } - defer mmap.Unmap() - - mmap[9] = 'X' - mmap.Flush() - - fileData, err := ioutil.ReadAll(f) - if err != nil { - t.Errorf("error reading file: %s", err) - } - if !bytes.Equal(fileData, testData) { - t.Errorf("file was modified") - } -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go deleted file mode 100644 index f8188f1..0000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path/filepath" - "strings" - "testing" -) - -var ( - download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - testdata = flag.String("testdata", "testdata", "Directory containing the test data") -) - -func roundtrip(b, ebuf, dbuf []byte) error { - d, err := Decode(dbuf, Encode(ebuf, b)) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if !bytes.Equal(b, d) { - return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rng := rand.New(rand.NewSource(27354294)) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(rng.Uint32()) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestInvalidVarint(t *testing.T) { - data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00") - if _, err := DecodedLen(data); err != ErrCorrupt { - t.Errorf("DecodedLen: got %v, want ErrCorrupt", err) - } - if _, err := Decode(nil, data); err != ErrCorrupt { - t.Errorf("Decode: got %v, want ErrCorrupt", err) - } - - // The encoded varint overflows 32 bits - data = []byte("\xff\xff\xff\xff\xff\x00") - - if _, err := DecodedLen(data); err != ErrCorrupt { - t.Errorf("DecodedLen: got %v, want ErrCorrupt", err) - } - if _, err := Decode(nil, data); err != ErrCorrupt { - t.Errorf("Decode: got %v, want ErrCorrupt", err) - } -} - -func cmp(a, b []byte) error { - if len(a) != len(b) { - return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) - } - for i := range a { - if a[i] != b[i] { - return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) - } - } - return nil -} - -func TestFramingFormat(t *testing.T) { - // src is comprised of alternating 1e5-sized sequences of random - // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen - // because it is larger than maxUncompressedChunkLen (64k). - src := make([]byte, 1e6) - rng := rand.New(rand.NewSource(1)) - for i := 0; i < 10; i++ { - if i%2 == 0 { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(rng.Intn(256)) - } - } else { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(i) - } - } - } - - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(src); err != nil { - t.Fatalf("Write: encoding: %v", err) - } - dst, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Fatalf("ReadAll: decoding: %v", err) - } - if err := cmp(dst, src); err != nil { - t.Fatal(err) - } -} - -func TestReaderReset(t *testing.T) { - gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000) - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(gold); err != nil { - t.Fatalf("Write: %v", err) - } - encoded, invalid, partial := buf.String(), "invalid", "partial" - r := NewReader(nil) - for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} { - if s == partial { - r.Reset(strings.NewReader(encoded)) - if _, err := r.Read(make([]byte, 101)); err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - continue - } - r.Reset(strings.NewReader(s)) - got, err := ioutil.ReadAll(r) - switch s { - case encoded: - if err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - if err := cmp(got, gold); err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - case invalid: - if err == nil { - t.Errorf("#%d: got nil error, want non-nil", i) - continue - } - } - } -} - -func TestWriterReset(t *testing.T) { - gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000) - var gots, wants [][]byte - const n = 20 - w, failed := NewWriter(nil), false - for i := 0; i <= n; i++ { - buf := new(bytes.Buffer) - w.Reset(buf) - want := gold[:len(gold)*i/n] - if _, err := w.Write(want); err != nil { - t.Errorf("#%d: Write: %v", i, err) - failed = true - continue - } - got, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Errorf("#%d: ReadAll: %v", i, err) - failed = true - continue - } - gots = append(gots, got) - wants = append(wants, want) - } - if failed { - return - } - for i := range gots { - if err := cmp(gots[i], wants[i]); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func benchDecode(b *testing.B, src []byte) { - encoded := Encode(nil, src) - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Decode(src, encoded) - } -} - -func benchEncode(b *testing.B, src []byte) { - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - dst := make([]byte, MaxEncodedLen(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Encode(dst, src) - } -} - -func readFile(b testing.TB, filename string) []byte { - src, err := ioutil.ReadFile(filename) - if err != nil { - b.Skipf("skipping benchmark: %v", err) - } - if len(src) == 0 { - b.Fatalf("%s has zero length", filename) - } - return src -} - -// expand returns a slice of length n containing repeated copies of src. -func expand(src []byte, n int) []byte { - dst := make([]byte, n) - for x := dst; len(x) > 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -// testFiles' values are copied directly from -// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string -}{ - {"html", "html"}, - {"urls", "urls.10K"}, - {"jpg", "fireworks.jpeg"}, - {"jpg_200", "fireworks.jpeg"}, - {"pdf", "paper-100k.pdf"}, - {"html4", "html_x_4"}, - {"txt1", "alice29.txt"}, - {"txt2", "asyoulik.txt"}, - {"txt3", "lcet10.txt"}, - {"txt4", "plrabn12.txt"}, - {"pb", "geo.protodata"}, - {"gaviota", "kppkn.gtb"}, -} - -// The test data files are present at this canonical URL. -const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" - -func downloadTestdata(b *testing.B, basename string) (errRet error) { - filename := filepath.Join(*testdata, basename) - if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { - return nil - } - - if !*download { - b.Skipf("test data not found; skipping benchmark without the -download flag") - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) { - return fmt.Errorf("failed to create testdata: %s", err) - } - - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - url := baseURL + basename - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("failed to download %s: %s", url, err) - } - defer resp.Body.Close() - if s := resp.StatusCode; s != http.StatusOK { - return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) - } - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) - } - return nil -} - -func benchFile(b *testing.B, n int, decode bool) { - if err := downloadTestdata(b, testFiles[n].filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - data := readFile(b, filepath.Join(*testdata, testFiles[n].filename)) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } diff --git a/Godeps/_workspace/src/github.com/peterh/liner/input_test.go b/Godeps/_workspace/src/github.com/peterh/liner/input_test.go deleted file mode 100644 index e515a48..0000000 --- a/Godeps/_workspace/src/github.com/peterh/liner/input_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build !windows - -package liner - -import ( - "bufio" - "bytes" - "testing" -) - -func (s *State) expectRune(t *testing.T, r rune) { - item, err := s.readNext() - if err != nil { - t.Fatalf("Expected rune '%c', got error %s\n", r, err) - } - if v, ok := item.(rune); !ok { - t.Fatalf("Expected rune '%c', got non-rune %v\n", r, v) - } else { - if v != r { - t.Fatalf("Expected rune '%c', got rune '%c'\n", r, v) - } - } -} - -func (s *State) expectAction(t *testing.T, a action) { - item, err := s.readNext() - if err != nil { - t.Fatalf("Expected Action %d, got error %s\n", a, err) - } - if v, ok := item.(action); !ok { - t.Fatalf("Expected Action %d, got non-Action %v\n", a, v) - } else { - if v != a { - t.Fatalf("Expected Action %d, got Action %d\n", a, v) - } - } -} - -func TestTypes(t *testing.T) { - input := []byte{'A', 27, 'B', 27, 91, 68, 27, '[', '1', ';', '5', 'D', 'e'} - var s State - s.r = bufio.NewReader(bytes.NewBuffer(input)) - - next := make(chan nexter) - go func() { - for { - var n nexter - n.r, _, n.err = s.r.ReadRune() - next <- n - } - }() - s.next = next - - s.expectRune(t, 'A') - s.expectRune(t, 27) - s.expectRune(t, 'B') - s.expectAction(t, left) - s.expectAction(t, wordLeft) - - s.expectRune(t, 'e') -} diff --git a/Godeps/_workspace/src/github.com/peterh/liner/line_test.go b/Godeps/_workspace/src/github.com/peterh/liner/line_test.go deleted file mode 100644 index 727da6c..0000000 --- a/Godeps/_workspace/src/github.com/peterh/liner/line_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package liner - -import ( - "bytes" - "strings" - "testing" -) - -func TestAppend(t *testing.T) { - var s State - s.AppendHistory("foo") - s.AppendHistory("bar") - - var out bytes.Buffer - num, err := s.WriteHistory(&out) - if err != nil { - t.Fatal("Unexpected error writing history", err) - } - if num != 2 { - t.Fatalf("Expected 2 history entries, got %d", num) - } - - s.AppendHistory("baz") - num, err = s.WriteHistory(&out) - if err != nil { - t.Fatal("Unexpected error writing history", err) - } - if num != 3 { - t.Fatalf("Expected 3 history entries, got %d", num) - } - - s.AppendHistory("baz") - num, err = s.WriteHistory(&out) - if err != nil { - t.Fatal("Unexpected error writing history", err) - } - if num != 3 { - t.Fatalf("Expected 3 history entries after duplicate append, got %d", num) - } - - s.AppendHistory("baz") - -} - -func TestHistory(t *testing.T) { - input := `foo -bar -baz -quux -dingle` - - var s State - num, err := s.ReadHistory(strings.NewReader(input)) - if err != nil { - t.Fatal("Unexpected error reading history", err) - } - if num != 5 { - t.Fatal("Wrong number of history entries read") - } - - var out bytes.Buffer - num, err = s.WriteHistory(&out) - if err != nil { - t.Fatal("Unexpected error writing history", err) - } - if num != 5 { - t.Fatal("Wrong number of history entries written") - } - if strings.TrimSpace(out.String()) != input { - t.Fatal("Round-trip failure") - } - - // Test reading with a trailing newline present - var s2 State - num, err = s2.ReadHistory(&out) - if err != nil { - t.Fatal("Unexpected error reading history the 2nd time", err) - } - if num != 5 { - t.Fatal("Wrong number of history entries read the 2nd time") - } - - num, err = s.ReadHistory(strings.NewReader(input + "\n\xff")) - if err == nil { - t.Fatal("Unexpected success reading corrupted history", err) - } - if num != 5 { - t.Fatal("Wrong number of history entries read the 3rd time") - } -} diff --git a/Godeps/_workspace/src/github.com/peterh/liner/prefix_test.go b/Godeps/_workspace/src/github.com/peterh/liner/prefix_test.go deleted file mode 100644 index c826d6c..0000000 --- a/Godeps/_workspace/src/github.com/peterh/liner/prefix_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows linux darwin openbsd freebsd netbsd - -package liner - -import "testing" - -type testItem struct { - list []string - prefix string -} - -func TestPrefix(t *testing.T) { - list := []testItem{ - {[]string{"food", "foot"}, "foo"}, - {[]string{"foo", "foot"}, "foo"}, - {[]string{"food", "foo"}, "foo"}, - {[]string{"food", "foe", "foot"}, "fo"}, - {[]string{"food", "foot", "barbeque"}, ""}, - {[]string{"cafeteria", "café"}, "caf"}, - {[]string{"cafe", "café"}, "caf"}, - {[]string{"cafè", "café"}, "caf"}, - {[]string{"cafés", "café"}, "café"}, - {[]string{"áéíóú", "áéíóú"}, "áéíóú"}, - {[]string{"éclairs", "éclairs"}, "éclairs"}, - {[]string{"éclairs are the best", "éclairs are great", "éclairs"}, "éclairs"}, - {[]string{"éclair", "éclairs"}, "éclair"}, - {[]string{"éclairs", "éclair"}, "éclair"}, - {[]string{"éclair", "élan"}, "é"}, - } - - for _, test := range list { - lcp := longestCommonPrefix(test.list) - if lcp != test.prefix { - t.Errorf("%s != %s for %+v", lcp, test.prefix, test.list) - } - } -} diff --git a/Godeps/_workspace/src/github.com/peterh/liner/race_test.go b/Godeps/_workspace/src/github.com/peterh/liner/race_test.go deleted file mode 100644 index e320849..0000000 --- a/Godeps/_workspace/src/github.com/peterh/liner/race_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build race - -package liner - -import ( - "io/ioutil" - "os" - "sync" - "testing" -) - -func TestWriteHistory(t *testing.T) { - oldout := os.Stdout - defer func() { os.Stdout = oldout }() - oldin := os.Stdout - defer func() { os.Stdin = oldin }() - - newinr, newinw, err := os.Pipe() - if err != nil { - t.Fatal(err) - } - os.Stdin = newinr - newoutr, newoutw, err := os.Pipe() - if err != nil { - t.Fatal(err) - } - defer newoutr.Close() - os.Stdout = newoutw - - var wait sync.WaitGroup - wait.Add(1) - s := NewLiner() - go func() { - s.AppendHistory("foo") - s.AppendHistory("bar") - s.Prompt("") - wait.Done() - }() - - s.WriteHistory(ioutil.Discard) - - newinw.Close() - wait.Wait() -} diff --git a/Godeps/_workspace/src/github.com/peterh/liner/width_test.go b/Godeps/_workspace/src/github.com/peterh/liner/width_test.go deleted file mode 100644 index add779c..0000000 --- a/Godeps/_workspace/src/github.com/peterh/liner/width_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package liner - -import ( - "strconv" - "testing" -) - -func accent(in []rune) []rune { - var out []rune - for _, r := range in { - out = append(out, r) - out = append(out, '\u0301') - } - return out -} - -type testCase struct { - s []rune - glyphs int -} - -var testCases = []testCase{ - {[]rune("query"), 5}, - {[]rune("ç§"), 2}, - {[]rune("hello世界"), 9}, -} - -func TestCountGlyphs(t *testing.T) { - for _, testCase := range testCases { - count := countGlyphs(testCase.s) - if count != testCase.glyphs { - t.Errorf("ASCII count incorrect. %d != %d", count, testCase.glyphs) - } - count = countGlyphs(accent(testCase.s)) - if count != testCase.glyphs { - t.Errorf("Accent count incorrect. %d != %d", count, testCase.glyphs) - } - } -} - -func compare(a, b []rune, name string, t *testing.T) { - if len(a) != len(b) { - t.Errorf(`"%s" != "%s" in %s"`, string(a), string(b), name) - return - } - for i := range a { - if a[i] != b[i] { - t.Errorf(`"%s" != "%s" in %s"`, string(a), string(b), name) - return - } - } -} - -func TestPrefixGlyphs(t *testing.T) { - for _, testCase := range testCases { - for i := 0; i <= len(testCase.s); i++ { - iter := strconv.Itoa(i) - out := getPrefixGlyphs(testCase.s, i) - compare(out, testCase.s[:i], "ascii prefix "+iter, t) - out = getPrefixGlyphs(accent(testCase.s), i) - compare(out, accent(testCase.s[:i]), "accent prefix "+iter, t) - } - out := getPrefixGlyphs(testCase.s, 999) - compare(out, testCase.s, "ascii prefix overflow", t) - out = getPrefixGlyphs(accent(testCase.s), 999) - compare(out, accent(testCase.s), "accent prefix overflow", t) - - out = getPrefixGlyphs(testCase.s, -3) - if len(out) != 0 { - t.Error("ascii prefix negative") - } - out = getPrefixGlyphs(accent(testCase.s), -3) - if len(out) != 0 { - t.Error("accent prefix negative") - } - } -} - -func TestSuffixGlyphs(t *testing.T) { - for _, testCase := range testCases { - for i := 0; i <= len(testCase.s); i++ { - iter := strconv.Itoa(i) - out := getSuffixGlyphs(testCase.s, i) - compare(out, testCase.s[len(testCase.s)-i:], "ascii suffix "+iter, t) - out = getSuffixGlyphs(accent(testCase.s), i) - compare(out, accent(testCase.s[len(testCase.s)-i:]), "accent suffix "+iter, t) - } - out := getSuffixGlyphs(testCase.s, 999) - compare(out, testCase.s, "ascii suffix overflow", t) - out = getSuffixGlyphs(accent(testCase.s), 999) - compare(out, accent(testCase.s), "accent suffix overflow", t) - - out = getSuffixGlyphs(testCase.s, -3) - if len(out) != 0 { - t.Error("ascii suffix negative") - } - out = getSuffixGlyphs(accent(testCase.s), -3) - if len(out) != 0 { - t.Error("accent suffix negative") - } - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/go/bson/bson_test.go b/Godeps/_workspace/src/github.com/siddontang/go/bson/bson_test.go deleted file mode 100644 index 3d97998..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/bson/bson_test.go +++ /dev/null @@ -1,1472 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// gobson - BSON library for Go. - -package bson_test - -import ( - "encoding/binary" - "encoding/json" - "errors" - "net/url" - "reflect" - "testing" - "time" - - . "gopkg.in/check.v1" - "gopkg.in/mgo.v2/bson" -) - -func TestAll(t *testing.T) { - TestingT(t) -} - -type S struct{} - -var _ = Suite(&S{}) - -// Wrap up the document elements contained in data, prepending the int32 -// length of the data, and appending the '\x00' value closing the document. -func wrapInDoc(data string) string { - result := make([]byte, len(data)+5) - binary.LittleEndian.PutUint32(result, uint32(len(result))) - copy(result[4:], []byte(data)) - return string(result) -} - -func makeZeroDoc(value interface{}) (zero interface{}) { - v := reflect.ValueOf(value) - t := v.Type() - switch t.Kind() { - case reflect.Map: - mv := reflect.MakeMap(t) - zero = mv.Interface() - case reflect.Ptr: - pv := reflect.New(v.Type().Elem()) - zero = pv.Interface() - case reflect.Slice: - zero = reflect.New(t).Interface() - default: - panic("unsupported doc type") - } - return zero -} - -func testUnmarshal(c *C, data string, obj interface{}) { - zero := makeZeroDoc(obj) - err := bson.Unmarshal([]byte(data), zero) - c.Assert(err, IsNil) - c.Assert(zero, DeepEquals, obj) -} - -type testItemType struct { - obj interface{} - data string -} - -// -------------------------------------------------------------------------- -// Samples from bsonspec.org: - -var sampleItems = []testItemType{ - {bson.M{"hello": "world"}, - "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, - {bson.M{"BSON": []interface{}{"awesome", float64(5.05), 1986}}, - "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + - "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, -} - -func (s *S) TestMarshalSampleItems(c *C) { - for i, item := range sampleItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalSampleItems(c *C) { - for i, item := range sampleItems { - value := bson.M{} - err := bson.Unmarshal([]byte(item.data), value) - c.Assert(err, IsNil) - c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d", i)) - } -} - -// -------------------------------------------------------------------------- -// Every type, ordered by the type flag. These are not wrapped with the -// length and last \x00 from the document. wrapInDoc() computes them. -// Note that all of them should be supported as two-way conversions. - -var allItems = []testItemType{ - {bson.M{}, - ""}, - {bson.M{"_": float64(5.05)}, - "\x01_\x00333333\x14@"}, - {bson.M{"_": "yo"}, - "\x02_\x00\x03\x00\x00\x00yo\x00"}, - {bson.M{"_": bson.M{"a": true}}, - "\x03_\x00\x09\x00\x00\x00\x08a\x00\x01\x00"}, - {bson.M{"_": []interface{}{true, false}}, - "\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - {bson.M{"_": []byte("yo")}, - "\x05_\x00\x02\x00\x00\x00\x00yo"}, - {bson.M{"_": bson.Binary{0x80, []byte("udef")}}, - "\x05_\x00\x04\x00\x00\x00\x80udef"}, - {bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild. - "\x06_\x00"}, - {bson.M{"_": bson.ObjectId("0123456789ab")}, - "\x07_\x000123456789ab"}, - {bson.M{"_": false}, - "\x08_\x00\x00"}, - {bson.M{"_": true}, - "\x08_\x00\x01"}, - {bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion. - "\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": nil}, - "\x0A_\x00"}, - {bson.M{"_": bson.RegEx{"ab", "cd"}}, - "\x0B_\x00ab\x00cd\x00"}, - {bson.M{"_": bson.JavaScript{"code", nil}}, - "\x0D_\x00\x05\x00\x00\x00code\x00"}, - {bson.M{"_": bson.Symbol("sym")}, - "\x0E_\x00\x04\x00\x00\x00sym\x00"}, - {bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}}, - "\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + - "\x07\x00\x00\x00\x0A\x00\x00"}, - {bson.M{"_": 258}, - "\x10_\x00\x02\x01\x00\x00"}, - {bson.M{"_": bson.MongoTimestamp(258)}, - "\x11_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": int64(258)}, - "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": int64(258 << 32)}, - "\x12_\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, - {bson.M{"_": bson.MaxKey}, - "\x7F_\x00"}, - {bson.M{"_": bson.MinKey}, - "\xFF_\x00"}, -} - -func (s *S) TestMarshalAllItems(c *C) { - for i, item := range allItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalAllItems(c *C) { - for i, item := range allItems { - value := bson.M{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), value) - c.Assert(err, IsNil) - c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawAllItems(c *C) { - for i, item := range allItems { - if len(item.data) == 0 { - continue - } - value := item.obj.(bson.M)["_"] - if value == nil { - continue - } - pv := reflect.New(reflect.ValueOf(value).Type()) - raw := bson.Raw{item.data[0], []byte(item.data[3:])} - c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface()) - err := raw.Unmarshal(pv.Interface()) - c.Assert(err, IsNil) - c.Assert(pv.Elem().Interface(), DeepEquals, value, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawIncompatible(c *C) { - raw := bson.Raw{0x08, []byte{0x01}} // true - err := raw.Unmarshal(&struct{}{}) - c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}") -} - -func (s *S) TestUnmarshalZeroesStruct(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - type T struct{ A, B int } - v := T{A: 1} - err = bson.Unmarshal(data, &v) - c.Assert(err, IsNil) - c.Assert(v.A, Equals, 0) - c.Assert(v.B, Equals, 2) -} - -func (s *S) TestUnmarshalZeroesMap(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - m := bson.M{"a": 1} - err = bson.Unmarshal(data, &m) - c.Assert(err, IsNil) - c.Assert(m, DeepEquals, bson.M{"b": 2}) -} - -func (s *S) TestUnmarshalNonNilInterface(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - m := bson.M{"a": 1} - var i interface{} - i = m - err = bson.Unmarshal(data, &i) - c.Assert(err, IsNil) - c.Assert(i, DeepEquals, bson.M{"b": 2}) - c.Assert(m, DeepEquals, bson.M{"a": 1}) -} - -// -------------------------------------------------------------------------- -// Some one way marshaling operations which would unmarshal differently. - -var oneWayMarshalItems = []testItemType{ - // These are being passed as pointers, and will unmarshal as values. - {bson.M{"": &bson.Binary{0x02, []byte("old")}}, - "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - {bson.M{"": &bson.Binary{0x80, []byte("udef")}}, - "\x05\x00\x04\x00\x00\x00\x80udef"}, - {bson.M{"": &bson.RegEx{"ab", "cd"}}, - "\x0B\x00ab\x00cd\x00"}, - {bson.M{"": &bson.JavaScript{"code", nil}}, - "\x0D\x00\x05\x00\x00\x00code\x00"}, - {bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}}, - "\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + - "\x07\x00\x00\x00\x0A\x00\x00"}, - - // There's no float32 type in BSON. Will encode as a float64. - {bson.M{"": float32(5.05)}, - "\x01\x00\x00\x00\x00@33\x14@"}, - - // The array will be unmarshaled as a slice instead. - {bson.M{"": [2]bool{true, false}}, - "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - - // The typed slice will be unmarshaled as []interface{}. - {bson.M{"": []bool{true, false}}, - "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - - // Will unmarshal as a []byte. - {bson.M{"": bson.Binary{0x00, []byte("yo")}}, - "\x05\x00\x02\x00\x00\x00\x00yo"}, - {bson.M{"": bson.Binary{0x02, []byte("old")}}, - "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - - // No way to preserve the type information here. We might encode as a zero - // value, but this would mean that pointer values in structs wouldn't be - // able to correctly distinguish between unset and set to the zero value. - {bson.M{"": (*byte)(nil)}, - "\x0A\x00"}, - - // No int types smaller than int32 in BSON. Could encode this as a char, - // but it would still be ambiguous, take more, and be awkward in Go when - // loaded without typing information. - {bson.M{"": byte(8)}, - "\x10\x00\x08\x00\x00\x00"}, - - // There are no unsigned types in BSON. Will unmarshal as int32 or int64. - {bson.M{"": uint32(258)}, - "\x10\x00\x02\x01\x00\x00"}, - {bson.M{"": uint64(258)}, - "\x12\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"": uint64(258 << 32)}, - "\x12\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, - - // This will unmarshal as int. - {bson.M{"": int32(258)}, - "\x10\x00\x02\x01\x00\x00"}, - - // That's a special case. The unsigned value is too large for an int32, - // so an int64 is used instead. - {bson.M{"": uint32(1<<32 - 1)}, - "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, - {bson.M{"": uint(1<<32 - 1)}, - "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, -} - -func (s *S) TestOneWayMarshalItems(c *C) { - for i, item := range oneWayMarshalItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item %d", i)) - } -} - -// -------------------------------------------------------------------------- -// Two-way tests for user-defined structures using the samples -// from bsonspec.org. - -type specSample1 struct { - Hello string -} - -type specSample2 struct { - BSON []interface{} "BSON" -} - -var structSampleItems = []testItemType{ - {&specSample1{"world"}, - "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, - {&specSample2{[]interface{}{"awesome", float64(5.05), 1986}}, - "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + - "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, -} - -func (s *S) TestMarshalStructSampleItems(c *C) { - for i, item := range structSampleItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data, - Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalStructSampleItems(c *C) { - for _, item := range structSampleItems { - testUnmarshal(c, item.data, item.obj) - } -} - -func (s *S) Test64bitInt(c *C) { - var i int64 = (1 << 31) - if int(i) > 0 { - data, err := bson.Marshal(bson.M{"i": int(i)}) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc("\x12i\x00\x00\x00\x00\x80\x00\x00\x00\x00")) - - var result struct{ I int } - err = bson.Unmarshal(data, &result) - c.Assert(err, IsNil) - c.Assert(int64(result.I), Equals, i) - } -} - -// -------------------------------------------------------------------------- -// Generic two-way struct marshaling tests. - -var bytevar = byte(8) -var byteptr = &bytevar - -var structItems = []testItemType{ - {&struct{ Ptr *byte }{nil}, - "\x0Aptr\x00"}, - {&struct{ Ptr *byte }{&bytevar}, - "\x10ptr\x00\x08\x00\x00\x00"}, - {&struct{ Ptr **byte }{&byteptr}, - "\x10ptr\x00\x08\x00\x00\x00"}, - {&struct{ Byte byte }{8}, - "\x10byte\x00\x08\x00\x00\x00"}, - {&struct{ Byte byte }{0}, - "\x10byte\x00\x00\x00\x00\x00"}, - {&struct { - V byte "Tag" - }{8}, - "\x10Tag\x00\x08\x00\x00\x00"}, - {&struct { - V *struct { - Byte byte - } - }{&struct{ Byte byte }{8}}, - "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, - {&struct{ priv byte }{}, ""}, - - // The order of the dumped fields should be the same in the struct. - {&struct{ A, C, B, D, F, E *byte }{}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"}, - - {&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}}, - "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, - {&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}}, - "\x10v\x00" + "\x00\x00\x00\x00"}, - - // Byte arrays. - {&struct{ V [2]byte }{[2]byte{'y', 'o'}}, - "\x05v\x00\x02\x00\x00\x00\x00yo"}, -} - -func (s *S) TestMarshalStructItems(c *C) { - for i, item := range structItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalStructItems(c *C) { - for _, item := range structItems { - testUnmarshal(c, wrapInDoc(item.data), item.obj) - } -} - -func (s *S) TestUnmarshalRawStructItems(c *C) { - for i, item := range structItems { - raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))} - zero := makeZeroDoc(item.obj) - err := raw.Unmarshal(zero) - c.Assert(err, IsNil) - c.Assert(zero, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawNil(c *C) { - // Regression test: shouldn't try to nil out the pointer itself, - // as it's not settable. - raw := bson.Raw{0x0A, []byte{}} - err := raw.Unmarshal(&struct{}{}) - c.Assert(err, IsNil) -} - -// -------------------------------------------------------------------------- -// One-way marshaling tests. - -type dOnIface struct { - D interface{} -} - -type ignoreField struct { - Before string - Ignore string `bson:"-"` - After string -} - -var marshalItems = []testItemType{ - // Ordered document dump. Will unmarshal as a dictionary by default. - {bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, - {MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, - {&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, - - {bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, - "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, - {MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, - "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, - {&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")}, - - {&ignoreField{"before", "ignore", "after"}, - "\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"}, - - // Marshalling a Raw document does nothing. - {bson.Raw{0x03, []byte(wrapInDoc("anything"))}, - "anything"}, - {bson.Raw{Data: []byte(wrapInDoc("anything"))}, - "anything"}, -} - -func (s *S) TestMarshalOneWayItems(c *C) { - for _, item := range marshalItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data)) - } -} - -// -------------------------------------------------------------------------- -// One-way unmarshaling tests. - -var unmarshalItems = []testItemType{ - // Field is private. Should not attempt to unmarshal it. - {&struct{ priv byte }{}, - "\x10priv\x00\x08\x00\x00\x00"}, - - // Wrong casing. Field names are lowercased. - {&struct{ Byte byte }{}, - "\x10Byte\x00\x08\x00\x00\x00"}, - - // Ignore non-existing field. - {&struct{ Byte byte }{9}, - "\x10boot\x00\x08\x00\x00\x00" + "\x10byte\x00\x09\x00\x00\x00"}, - - // Do not unmarshal on ignored field. - {&ignoreField{"before", "", "after"}, - "\x02before\x00\a\x00\x00\x00before\x00" + - "\x02-\x00\a\x00\x00\x00ignore\x00" + - "\x02after\x00\x06\x00\x00\x00after\x00"}, - - // Ignore unsuitable types silently. - {map[string]string{"str": "s"}, - "\x02str\x00\x02\x00\x00\x00s\x00" + "\x10int\x00\x01\x00\x00\x00"}, - {map[string][]int{"array": []int{5, 9}}, - "\x04array\x00" + wrapInDoc("\x100\x00\x05\x00\x00\x00"+"\x021\x00\x02\x00\x00\x00s\x00"+"\x102\x00\x09\x00\x00\x00")}, - - // Wrong type. Shouldn't init pointer. - {&struct{ Str *byte }{}, - "\x02str\x00\x02\x00\x00\x00s\x00"}, - {&struct{ Str *struct{ Str string } }{}, - "\x02str\x00\x02\x00\x00\x00s\x00"}, - - // Ordered document. - {&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, - - // Raw document. - {&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))}, - "\x10byte\x00\x08\x00\x00\x00"}, - - // RawD document. - {&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, - "\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")}, - - // Decode old binary. - {bson.M{"_": []byte("old")}, - "\x05_\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - - // Decode old binary without length. According to the spec, this shouldn't happen. - {bson.M{"_": []byte("old")}, - "\x05_\x00\x03\x00\x00\x00\x02old"}, -} - -func (s *S) TestUnmarshalOneWayItems(c *C) { - for _, item := range unmarshalItems { - testUnmarshal(c, wrapInDoc(item.data), item.obj) - } -} - -func (s *S) TestUnmarshalNilInStruct(c *C) { - // Nil is the default value, so we need to ensure it's indeed being set. - b := byte(1) - v := &struct{ Ptr *byte }{&b} - err := bson.Unmarshal([]byte(wrapInDoc("\x0Aptr\x00")), v) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, &struct{ Ptr *byte }{nil}) -} - -// -------------------------------------------------------------------------- -// Marshalling error cases. - -type structWithDupKeys struct { - Name byte - Other byte "name" // Tag should precede. -} - -var marshalErrorItems = []testItemType{ - {bson.M{"": uint64(1 << 63)}, - "BSON has no uint64 type, and value is too large to fit correctly in an int64"}, - {bson.M{"": bson.ObjectId("tooshort")}, - "ObjectIDs must be exactly 12 bytes long \\(got 8\\)"}, - {int64(123), - "Can't marshal int64 as a BSON document"}, - {bson.M{"": 1i}, - "Can't marshal complex128 in a BSON document"}, - {&structWithDupKeys{}, - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - {bson.Raw{0x0A, []byte{}}, - "Attempted to unmarshal Raw kind 10 as a document"}, - {&inlineCantPtr{&struct{ A, B int }{1, 2}}, - "Option ,inline needs a struct value or map field"}, - {&inlineDupName{1, struct{ A, B int }{2, 3}}, - "Duplicated key 'a' in struct bson_test.inlineDupName"}, - {&inlineDupMap{}, - "Multiple ,inline maps in struct bson_test.inlineDupMap"}, - {&inlineBadKeyMap{}, - "Option ,inline needs a map with string keys in struct bson_test.inlineBadKeyMap"}, - {&inlineMap{A: 1, M: map[string]interface{}{"a": 1}}, - `Can't have key "a" in inlined map; conflicts with struct field`}, -} - -func (s *S) TestMarshalErrorItems(c *C) { - for _, item := range marshalErrorItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, ErrorMatches, item.data) - c.Assert(data, IsNil) - } -} - -// -------------------------------------------------------------------------- -// Unmarshalling error cases. - -type unmarshalErrorType struct { - obj interface{} - data string - error string -} - -var unmarshalErrorItems = []unmarshalErrorType{ - // Tag name conflicts with existing parameter. - {&structWithDupKeys{}, - "\x10name\x00\x08\x00\x00\x00", - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - - // Non-string map key. - {map[int]interface{}{}, - "\x10name\x00\x08\x00\x00\x00", - "BSON map must have string keys. Got: map\\[int\\]interface \\{\\}"}, - - {nil, - "\xEEname\x00", - "Unknown element kind \\(0xEE\\)"}, - - {struct{ Name bool }{}, - "\x10name\x00\x08\x00\x00\x00", - "Unmarshal can't deal with struct values. Use a pointer."}, - - {123, - "\x10name\x00\x08\x00\x00\x00", - "Unmarshal needs a map or a pointer to a struct."}, -} - -func (s *S) TestUnmarshalErrorItems(c *C) { - for _, item := range unmarshalErrorItems { - data := []byte(wrapInDoc(item.data)) - var value interface{} - switch reflect.ValueOf(item.obj).Kind() { - case reflect.Map, reflect.Ptr: - value = makeZeroDoc(item.obj) - case reflect.Invalid: - value = bson.M{} - default: - value = item.obj - } - err := bson.Unmarshal(data, value) - c.Assert(err, ErrorMatches, item.error) - } -} - -type unmarshalRawErrorType struct { - obj interface{} - raw bson.Raw - error string -} - -var unmarshalRawErrorItems = []unmarshalRawErrorType{ - // Tag name conflicts with existing parameter. - {&structWithDupKeys{}, - bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")}, - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - - {&struct{}{}, - bson.Raw{0xEE, []byte{}}, - "Unknown element kind \\(0xEE\\)"}, - - {struct{ Name bool }{}, - bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, - "Raw Unmarshal can't deal with struct values. Use a pointer."}, - - {123, - bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, - "Raw Unmarshal needs a map or a valid pointer."}, -} - -func (s *S) TestUnmarshalRawErrorItems(c *C) { - for i, item := range unmarshalRawErrorItems { - err := item.raw.Unmarshal(item.obj) - c.Assert(err, ErrorMatches, item.error, Commentf("Failed on item %d: %#v\n", i, item)) - } -} - -var corruptedData = []string{ - "\x04\x00\x00\x00\x00", // Shorter than minimum - "\x06\x00\x00\x00\x00", // Not enough data - "\x05\x00\x00", // Broken length - "\x05\x00\x00\x00\xff", // Corrupted termination - "\x0A\x00\x00\x00\x0Aooop\x00", // Unfinished C string - - // Array end past end of string (s[2]=0x07 is correct) - wrapInDoc("\x04\x00\x09\x00\x00\x00\x0A\x00\x00"), - - // Array end within string, but past acceptable. - wrapInDoc("\x04\x00\x08\x00\x00\x00\x0A\x00\x00"), - - // Document end within string, but past acceptable. - wrapInDoc("\x03\x00\x08\x00\x00\x00\x0A\x00\x00"), - - // String with corrupted end. - wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), -} - -func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { - for _, data := range corruptedData { - err := bson.Unmarshal([]byte(data), bson.M{}) - c.Assert(err, ErrorMatches, "Document is corrupted") - - err = bson.Unmarshal([]byte(data), &struct{}{}) - c.Assert(err, ErrorMatches, "Document is corrupted") - } -} - -// -------------------------------------------------------------------------- -// Setter test cases. - -var setterResult = map[string]error{} - -type setterType struct { - received interface{} -} - -func (o *setterType) SetBSON(raw bson.Raw) error { - err := raw.Unmarshal(&o.received) - if err != nil { - panic("The panic:" + err.Error()) - } - if s, ok := o.received.(string); ok { - if result, ok := setterResult[s]; ok { - return result - } - } - return nil -} - -type ptrSetterDoc struct { - Field *setterType "_" -} - -type valSetterDoc struct { - Field setterType "_" -} - -func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) { - for _, item := range allItems { - for i := 0; i != 2; i++ { - var field *setterType - if i == 0 { - obj := &ptrSetterDoc{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) - c.Assert(err, IsNil) - field = obj.Field - } else { - obj := &valSetterDoc{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) - c.Assert(err, IsNil) - field = &obj.Field - } - if item.data == "" { - // Nothing to unmarshal. Should be untouched. - if i == 0 { - c.Assert(field, IsNil) - } else { - c.Assert(field.received, IsNil) - } - } else { - expected := item.obj.(bson.M)["_"] - c.Assert(field, NotNil, Commentf("Pointer not initialized (%#v)", expected)) - c.Assert(field.received, DeepEquals, expected) - } - } - } -} - -func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { - obj := &setterType{} - err := bson.Unmarshal([]byte(sampleItems[0].data), obj) - c.Assert(err, IsNil) - c.Assert(obj.received, DeepEquals, bson.M{"hello": "world"}) -} - -func (s *S) TestUnmarshalSetterOmits(c *C) { - setterResult["2"] = &bson.TypeError{} - setterResult["4"] = &bson.TypeError{} - defer func() { - delete(setterResult, "2") - delete(setterResult, "4") - }() - - m := map[string]*setterType{} - data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + - "\x02def\x00\x02\x00\x00\x002\x00" + - "\x02ghi\x00\x02\x00\x00\x003\x00" + - "\x02jkl\x00\x02\x00\x00\x004\x00") - err := bson.Unmarshal([]byte(data), m) - c.Assert(err, IsNil) - c.Assert(m["abc"], NotNil) - c.Assert(m["def"], IsNil) - c.Assert(m["ghi"], NotNil) - c.Assert(m["jkl"], IsNil) - - c.Assert(m["abc"].received, Equals, "1") - c.Assert(m["ghi"].received, Equals, "3") -} - -func (s *S) TestUnmarshalSetterErrors(c *C) { - boom := errors.New("BOOM") - setterResult["2"] = boom - defer delete(setterResult, "2") - - m := map[string]*setterType{} - data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + - "\x02def\x00\x02\x00\x00\x002\x00" + - "\x02ghi\x00\x02\x00\x00\x003\x00") - err := bson.Unmarshal([]byte(data), m) - c.Assert(err, Equals, boom) - c.Assert(m["abc"], NotNil) - c.Assert(m["def"], IsNil) - c.Assert(m["ghi"], IsNil) - - c.Assert(m["abc"].received, Equals, "1") -} - -func (s *S) TestDMap(c *C) { - d := bson.D{{"a", 1}, {"b", 2}} - c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2}) -} - -func (s *S) TestUnmarshalSetterSetZero(c *C) { - setterResult["foo"] = bson.SetZero - defer delete(setterResult, "field") - - data, err := bson.Marshal(bson.M{"field": "foo"}) - c.Assert(err, IsNil) - - m := map[string]*setterType{} - err = bson.Unmarshal([]byte(data), m) - c.Assert(err, IsNil) - - value, ok := m["field"] - c.Assert(ok, Equals, true) - c.Assert(value, IsNil) -} - -// -------------------------------------------------------------------------- -// Getter test cases. - -type typeWithGetter struct { - result interface{} - err error -} - -func (t *typeWithGetter) GetBSON() (interface{}, error) { - if t == nil { - return "", nil - } - return t.result, t.err -} - -type docWithGetterField struct { - Field *typeWithGetter "_" -} - -func (s *S) TestMarshalAllItemsWithGetter(c *C) { - for i, item := range allItems { - if item.data == "" { - continue - } - obj := &docWithGetterField{} - obj.Field = &typeWithGetter{result: item.obj.(bson.M)["_"]} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item #%d", i)) - } -} - -func (s *S) TestMarshalWholeDocumentWithGetter(c *C) { - obj := &typeWithGetter{result: sampleItems[0].obj} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, sampleItems[0].data) -} - -func (s *S) TestGetterErrors(c *C) { - e := errors.New("oops") - - obj1 := &docWithGetterField{} - obj1.Field = &typeWithGetter{sampleItems[0].obj, e} - data, err := bson.Marshal(obj1) - c.Assert(err, ErrorMatches, "oops") - c.Assert(data, IsNil) - - obj2 := &typeWithGetter{sampleItems[0].obj, e} - data, err = bson.Marshal(obj2) - c.Assert(err, ErrorMatches, "oops") - c.Assert(data, IsNil) -} - -type intGetter int64 - -func (t intGetter) GetBSON() (interface{}, error) { - return int64(t), nil -} - -type typeWithIntGetter struct { - V intGetter ",minsize" -} - -func (s *S) TestMarshalShortWithGetter(c *C) { - obj := typeWithIntGetter{42} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - m := bson.M{} - err = bson.Unmarshal(data, m) - c.Assert(err, IsNil) - c.Assert(m["v"], Equals, 42) -} - -func (s *S) TestMarshalWithGetterNil(c *C) { - obj := docWithGetterField{} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - m := bson.M{} - err = bson.Unmarshal(data, m) - c.Assert(err, IsNil) - c.Assert(m, DeepEquals, bson.M{"_": ""}) -} - -// -------------------------------------------------------------------------- -// Cross-type conversion tests. - -type crossTypeItem struct { - obj1 interface{} - obj2 interface{} -} - -type condStr struct { - V string ",omitempty" -} -type condStrNS struct { - V string `a:"A" bson:",omitempty" b:"B"` -} -type condBool struct { - V bool ",omitempty" -} -type condInt struct { - V int ",omitempty" -} -type condUInt struct { - V uint ",omitempty" -} -type condFloat struct { - V float64 ",omitempty" -} -type condIface struct { - V interface{} ",omitempty" -} -type condPtr struct { - V *bool ",omitempty" -} -type condSlice struct { - V []string ",omitempty" -} -type condMap struct { - V map[string]int ",omitempty" -} -type namedCondStr struct { - V string "myv,omitempty" -} -type condTime struct { - V time.Time ",omitempty" -} -type condStruct struct { - V struct{ A []int } ",omitempty" -} - -type shortInt struct { - V int64 ",minsize" -} -type shortUint struct { - V uint64 ",minsize" -} -type shortIface struct { - V interface{} ",minsize" -} -type shortPtr struct { - V *int64 ",minsize" -} -type shortNonEmptyInt struct { - V int64 ",minsize,omitempty" -} - -type inlineInt struct { - V struct{ A, B int } ",inline" -} -type inlineCantPtr struct { - V *struct{ A, B int } ",inline" -} -type inlineDupName struct { - A int - V struct{ A, B int } ",inline" -} -type inlineMap struct { - A int - M map[string]interface{} ",inline" -} -type inlineMapInt struct { - A int - M map[string]int ",inline" -} -type inlineMapMyM struct { - A int - M MyM ",inline" -} -type inlineDupMap struct { - M1 map[string]interface{} ",inline" - M2 map[string]interface{} ",inline" -} -type inlineBadKeyMap struct { - M map[int]int ",inline" -} - -type ( - MyString string - MyBytes []byte - MyBool bool - MyD []bson.DocElem - MyRawD []bson.RawDocElem - MyM map[string]interface{} -) - -var ( - truevar = true - falsevar = false - - int64var = int64(42) - int64ptr = &int64var - intvar = int(42) - intptr = &intvar -) - -func parseURL(s string) *url.URL { - u, err := url.Parse(s) - if err != nil { - panic(err) - } - return u -} - -// That's a pretty fun test. It will dump the first item, generate a zero -// value equivalent to the second one, load the dumped data onto it, and then -// verify that the resulting value is deep-equal to the untouched second value. -// Then, it will do the same in the *opposite* direction! -var twoWayCrossItems = []crossTypeItem{ - // int<=>int - {&struct{ I int }{42}, &struct{ I int8 }{42}}, - {&struct{ I int }{42}, &struct{ I int32 }{42}}, - {&struct{ I int }{42}, &struct{ I int64 }{42}}, - {&struct{ I int8 }{42}, &struct{ I int32 }{42}}, - {&struct{ I int8 }{42}, &struct{ I int64 }{42}}, - {&struct{ I int32 }{42}, &struct{ I int64 }{42}}, - - // uint<=>uint - {&struct{ I uint }{42}, &struct{ I uint8 }{42}}, - {&struct{ I uint }{42}, &struct{ I uint32 }{42}}, - {&struct{ I uint }{42}, &struct{ I uint64 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I uint32 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I uint64 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I uint64 }{42}}, - - // float32<=>float64 - {&struct{ I float32 }{42}, &struct{ I float64 }{42}}, - - // int<=>uint - {&struct{ I uint }{42}, &struct{ I int }{42}}, - {&struct{ I uint }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int64 }{42}}, - - // int <=> float - {&struct{ I int }{42}, &struct{ I float64 }{42}}, - - // int <=> bool - {&struct{ I int }{1}, &struct{ I bool }{true}}, - {&struct{ I int }{0}, &struct{ I bool }{false}}, - - // uint <=> float64 - {&struct{ I uint }{42}, &struct{ I float64 }{42}}, - - // uint <=> bool - {&struct{ I uint }{1}, &struct{ I bool }{true}}, - {&struct{ I uint }{0}, &struct{ I bool }{false}}, - - // float64 <=> bool - {&struct{ I float64 }{1}, &struct{ I bool }{true}}, - {&struct{ I float64 }{0}, &struct{ I bool }{false}}, - - // string <=> string and string <=> []byte - {&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}}, - {&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}}, - {&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}}, - - // map <=> struct - {&struct { - A struct { - B, C int - } - }{struct{ B, C int }{1, 2}}, - map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}}, - - {&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}}, - {&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}}, - {&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}}, - {&struct{ A uint }{42}, map[string]int{"a": 42}}, - {&struct{ A uint }{42}, map[string]float64{"a": 42}}, - {&struct{ A uint }{1}, map[string]bool{"a": true}}, - {&struct{ A int }{42}, map[string]uint{"a": 42}}, - {&struct{ A int }{42}, map[string]float64{"a": 42}}, - {&struct{ A int }{1}, map[string]bool{"a": true}}, - {&struct{ A float64 }{42}, map[string]float32{"a": 42}}, - {&struct{ A float64 }{42}, map[string]int{"a": 42}}, - {&struct{ A float64 }{42}, map[string]uint{"a": 42}}, - {&struct{ A float64 }{1}, map[string]bool{"a": true}}, - {&struct{ A bool }{true}, map[string]int{"a": 1}}, - {&struct{ A bool }{true}, map[string]uint{"a": 1}}, - {&struct{ A bool }{true}, map[string]float64{"a": 1}}, - {&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}}, - - // url.URL <=> string - {&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, - {&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, - - // Slices - {&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, - {&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, - - // Conditionals - {&condBool{true}, map[string]bool{"v": true}}, - {&condBool{}, map[string]bool{}}, - {&condInt{1}, map[string]int{"v": 1}}, - {&condInt{}, map[string]int{}}, - {&condUInt{1}, map[string]uint{"v": 1}}, - {&condUInt{}, map[string]uint{}}, - {&condFloat{}, map[string]int{}}, - {&condStr{"yo"}, map[string]string{"v": "yo"}}, - {&condStr{}, map[string]string{}}, - {&condStrNS{"yo"}, map[string]string{"v": "yo"}}, - {&condStrNS{}, map[string]string{}}, - {&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}}, - {&condSlice{}, map[string][]string{}}, - {&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}}, - {&condMap{}, map[string][]string{}}, - {&condIface{"yo"}, map[string]string{"v": "yo"}}, - {&condIface{""}, map[string]string{"v": ""}}, - {&condIface{}, map[string]string{}}, - {&condPtr{&truevar}, map[string]bool{"v": true}}, - {&condPtr{&falsevar}, map[string]bool{"v": false}}, - {&condPtr{}, map[string]string{}}, - - {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, - {&condTime{}, map[string]string{}}, - - {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, - {&condStruct{struct{ A []int }{}}, bson.M{}}, - - {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, - {&namedCondStr{}, map[string]string{}}, - - {&shortInt{1}, map[string]interface{}{"v": 1}}, - {&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}}, - {&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}}, - {&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}}, - - {&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}}, - {&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortNonEmptyInt{}, map[string]interface{}{}}, - - {&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}}, - {&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}}, - {&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}}, - {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, - {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, - {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, - - // []byte <=> MyBytes - {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, - {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, - {&struct{ B MyBytes }{}, map[string]bool{}}, - {&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}}, - - // bool <=> MyBool - {&struct{ B MyBool }{true}, map[string]bool{"b": true}}, - {&struct{ B MyBool }{}, map[string]bool{"b": false}}, - {&struct{ B MyBool }{}, map[string]string{}}, - {&struct{ B bool }{}, map[string]MyBool{"b": false}}, - - // arrays - {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, - - // zero time - {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, - - // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds - {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, - map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, - - // bson.D <=> []DocElem - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, - - // bson.RawD <=> []RawDocElem - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - - // bson.M <=> map - {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, - {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}}, - - // bson.M <=> map[MyString] - {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, - - // json.Number <=> int64, float64 - {&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}}, - {&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}}, - {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, -} - -// Same thing, but only one way (obj1 => obj2). -var oneWayCrossItems = []crossTypeItem{ - // map <=> struct - {map[string]interface{}{"a": 1, "b": "2", "c": 3}, map[string]int{"a": 1, "c": 3}}, - - // inline map elides badly typed values - {map[string]interface{}{"a": 1, "b": "2", "c": 3}, &inlineMapInt{A: 1, M: map[string]int{"c": 3}}}, - - // Can't decode int into struct. - {bson.M{"a": bson.M{"b": 2}}, &struct{ A bool }{}}, - - // Would get decoded into a int32 too in the opposite direction. - {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}}, -} - -func testCrossPair(c *C, dump interface{}, load interface{}) { - c.Logf("Dump: %#v", dump) - c.Logf("Load: %#v", load) - zero := makeZeroDoc(load) - data, err := bson.Marshal(dump) - c.Assert(err, IsNil) - c.Logf("Dumped: %#v", string(data)) - err = bson.Unmarshal(data, zero) - c.Assert(err, IsNil) - c.Logf("Loaded: %#v", zero) - c.Assert(zero, DeepEquals, load) -} - -func (s *S) TestTwoWayCrossPairs(c *C) { - for _, item := range twoWayCrossItems { - testCrossPair(c, item.obj1, item.obj2) - testCrossPair(c, item.obj2, item.obj1) - } -} - -func (s *S) TestOneWayCrossPairs(c *C) { - for _, item := range oneWayCrossItems { - testCrossPair(c, item.obj1, item.obj2) - } -} - -// -------------------------------------------------------------------------- -// ObjectId hex representation test. - -func (s *S) TestObjectIdHex(c *C) { - id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") - c.Assert(id.String(), Equals, `ObjectIdHex("4d88e15b60f486e428412dc9")`) - c.Assert(id.Hex(), Equals, "4d88e15b60f486e428412dc9") -} - -func (s *S) TestIsObjectIdHex(c *C) { - test := []struct { - id string - valid bool - }{ - {"4d88e15b60f486e428412dc9", true}, - {"4d88e15b60f486e428412dc", false}, - {"4d88e15b60f486e428412dc9e", false}, - {"4d88e15b60f486e428412dcx", false}, - } - for _, t := range test { - c.Assert(bson.IsObjectIdHex(t.id), Equals, t.valid) - } -} - -// -------------------------------------------------------------------------- -// ObjectId parts extraction tests. - -type objectIdParts struct { - id bson.ObjectId - timestamp int64 - machine []byte - pid uint16 - counter int32 -} - -var objectIds = []objectIdParts{ - objectIdParts{ - bson.ObjectIdHex("4d88e15b60f486e428412dc9"), - 1300816219, - []byte{0x60, 0xf4, 0x86}, - 0xe428, - 4271561, - }, - objectIdParts{ - bson.ObjectIdHex("000000000000000000000000"), - 0, - []byte{0x00, 0x00, 0x00}, - 0x0000, - 0, - }, - objectIdParts{ - bson.ObjectIdHex("00000000aabbccddee000001"), - 0, - []byte{0xaa, 0xbb, 0xcc}, - 0xddee, - 1, - }, -} - -func (s *S) TestObjectIdPartsExtraction(c *C) { - for i, v := range objectIds { - t := time.Unix(v.timestamp, 0) - c.Assert(v.id.Time(), Equals, t, Commentf("#%d Wrong timestamp value", i)) - c.Assert(v.id.Machine(), DeepEquals, v.machine, Commentf("#%d Wrong machine id value", i)) - c.Assert(v.id.Pid(), Equals, v.pid, Commentf("#%d Wrong pid value", i)) - c.Assert(v.id.Counter(), Equals, v.counter, Commentf("#%d Wrong counter value", i)) - } -} - -func (s *S) TestNow(c *C) { - before := time.Now() - time.Sleep(1e6) - now := bson.Now() - time.Sleep(1e6) - after := time.Now() - c.Assert(now.After(before) && now.Before(after), Equals, true, Commentf("now=%s, before=%s, after=%s", now, before, after)) -} - -// -------------------------------------------------------------------------- -// ObjectId generation tests. - -func (s *S) TestNewObjectId(c *C) { - // Generate 10 ids - ids := make([]bson.ObjectId, 10) - for i := 0; i < 10; i++ { - ids[i] = bson.NewObjectId() - } - for i := 1; i < 10; i++ { - prevId := ids[i-1] - id := ids[i] - // Test for uniqueness among all other 9 generated ids - for j, tid := range ids { - if j != i { - c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique")) - } - } - // Check that timestamp was incremented and is within 30 seconds of the previous one - secs := id.Time().Sub(prevId.Time()).Seconds() - c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId")) - // Check that machine ids are the same - c.Assert(id.Machine(), DeepEquals, prevId.Machine()) - // Check that pids are the same - c.Assert(id.Pid(), Equals, prevId.Pid()) - // Test for proper increment - delta := int(id.Counter() - prevId.Counter()) - c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId")) - } -} - -func (s *S) TestNewObjectIdWithTime(c *C) { - t := time.Unix(12345678, 0) - id := bson.NewObjectIdWithTime(t) - c.Assert(id.Time(), Equals, t) - c.Assert(id.Machine(), DeepEquals, []byte{0x00, 0x00, 0x00}) - c.Assert(int(id.Pid()), Equals, 0) - c.Assert(int(id.Counter()), Equals, 0) -} - -// -------------------------------------------------------------------------- -// ObjectId JSON marshalling. - -type jsonType struct { - Id *bson.ObjectId -} - -func (s *S) TestObjectIdJSONMarshaling(c *C) { - id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") - v := jsonType{Id: &id} - data, err := json.Marshal(&v) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, `{"Id":"4d88e15b60f486e428412dc9"}`) -} - -func (s *S) TestObjectIdJSONUnmarshaling(c *C) { - data := []byte(`{"Id":"4d88e15b60f486e428412dc9"}`) - v := jsonType{} - err := json.Unmarshal(data, &v) - c.Assert(err, IsNil) - c.Assert(*v.Id, Equals, bson.ObjectIdHex("4d88e15b60f486e428412dc9")) -} - -func (s *S) TestObjectIdJSONUnmarshalingError(c *C) { - v := jsonType{} - err := json.Unmarshal([]byte(`{"Id":"4d88e15b60f486e428412dc9A"}`), &v) - c.Assert(err, ErrorMatches, `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`) - err = json.Unmarshal([]byte(`{"Id":"4d88e15b60f486e428412dcZ"}`), &v) - c.Assert(err, ErrorMatches, `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`) -} - -// -------------------------------------------------------------------------- -// Some simple benchmarks. - -type BenchT struct { - A, B, C, D, E, F string -} - -func BenchmarkUnmarhsalStruct(b *testing.B) { - v := BenchT{A: "A", D: "D", E: "E"} - data, err := bson.Marshal(&v) - if err != nil { - panic(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - err = bson.Unmarshal(data, &v) - } - if err != nil { - panic(err) - } -} - -func BenchmarkUnmarhsalMap(b *testing.B) { - m := bson.M{"a": "a", "d": "d", "e": "e"} - data, err := bson.Marshal(&m) - if err != nil { - panic(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - err = bson.Unmarshal(data, &m) - } - if err != nil { - panic(err) - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_test.go b/Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_test.go deleted file mode 100644 index 3575fc2..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package filelock - -import ( - "bytes" - "flag" - "io/ioutil" - "os" - "os/exec" - "testing" -) - -var lockFilename = flag.String("lockfile", "", "File to lock. Non-empty value pimples child process.") - -func spawn(prog, filename string) ([]byte, error) { - return exec.Command(prog, "-lockfile", filename, "-test.v", - "-test.run=TestLock$").CombinedOutput() -} - -// TestLock locks a file, spawns a second process that attempts to grab the -// lock to verify it fails. -// Then it closes the lock, and spawns a third copy to verify it can be -// relocked. -func TestLock(t *testing.T) { - child := *lockFilename != "" - var filename string - if child { - filename = *lockFilename - } else { - f, err := ioutil.TempFile("", "") - if err != nil { - t.Fatal(err) - } - filename = f.Name() - defer os.Remove(filename) - } - - // Avoid truncating an existing, non-empty file. - fi, err := os.Stat(filename) - if err == nil && fi.Size() != 0 { - t.Fatal("The file %s is not empty", filename) - } - - t.Logf("Locking %s\n", filename) - lock, err := Lock(filename) - if err != nil { - t.Fatalf("Could not lock %s: %v", filename, err) - } - - if !child { - t.Logf("Spawning child, should fail to grab lock.") - out, err := spawn(os.Args[0], filename) - if err == nil { - t.Fatalf("Attempt to grab open lock should have failed.\n%s", out) - } - if !bytes.Contains(out, []byte("Could not lock")) { - t.Fatalf("Child failed with unexpected output: %s\n", out) - } - t.Logf("Child failed to grab lock as expected.") - } - - t.Logf("Unlocking %s", filename) - if err := lock.Close(); err != nil { - t.Fatalf("Could not unlock %s: %v", filename, err) - } - - if !child { - t.Logf("Spawning child, should successfully grab lock.") - if out, err := spawn(os.Args[0], filename); err != nil { - t.Fatalf("Attempt to re-open lock should have succeeded: %v\n%s", - err, out) - } - t.Logf("Child grabbed lock.") - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/go/hack/hack_test.go b/Godeps/_workspace/src/github.com/siddontang/go/hack/hack_test.go deleted file mode 100644 index 7b11b0b..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/hack/hack_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package hack - -import ( - "bytes" - "testing" -) - -func TestString(t *testing.T) { - b := []byte("hello world") - a := String(b) - - if a != "hello world" { - t.Fatal(a) - } - - b[0] = 'a' - - if a != "aello world" { - t.Fatal(a) - } - - b = append(b, "abc"...) - if a != "aello world" { - t.Fatal(a) - } -} - -func TestByte(t *testing.T) { - a := "hello world" - - b := Slice(a) - - if !bytes.Equal(b, []byte("hello world")) { - t.Fatal(string(b)) - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/go/ioutil2/sectionwriter_test.go b/Godeps/_workspace/src/github.com/siddontang/go/ioutil2/sectionwriter_test.go deleted file mode 100644 index 35bf6d7..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/ioutil2/sectionwriter_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package ioutil2 - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestSectionWriter(t *testing.T) { - f, err := ioutil.TempFile(".", "test_") - if err != nil { - t.Fatal(err) - } - - defer func() { - n := f.Name() - f.Close() - os.Remove(n) - }() - - f.Truncate(3) - - rw := NewSectionWriter(f, 0, 1) - - _, err = rw.Write([]byte{'1'}) - if err != nil { - t.Fatal(err) - } - - _, err = rw.Write([]byte{'1'}) - if err == nil { - t.Fatal("must err") - } - - rw = NewSectionWriter(f, 1, 2) - - _, err = rw.Write([]byte{'2', '3', '4'}) - if err == nil { - t.Fatal("must err") - } - - _, err = rw.Write([]byte{'2', '3'}) - if err != nil { - t.Fatal(err) - } - - buf := make([]byte, 3) - _, err = f.ReadAt(buf, 0) - if err != nil { - t.Fatal(err) - } - - if string(buf) != "123" { - t.Fatal(string(buf)) - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/go/log/log_test.go b/Godeps/_workspace/src/github.com/siddontang/go/log/log_test.go deleted file mode 100644 index 2e29b31..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/log/log_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package log - -import ( - "os" - "testing" -) - -func TestStdStreamLog(t *testing.T) { - h, _ := NewStreamHandler(os.Stdout) - s := NewDefault(h) - s.Info("hello world") - - s.Close() - - s.Info("can not log") - - Info("hello world") - - SetHandler(os.Stderr) - - Infof("%s %d", "Hello", 123) - - SetLevel(LevelError) - - Infof("%s %d", "Hello", 123) - Fatalf("%s %d", "Hello", 123) -} - -func TestRotatingFileLog(t *testing.T) { - path := "./test_log" - os.RemoveAll(path) - - os.Mkdir(path, 0777) - fileName := path + "/test" - - h, err := NewRotatingFileHandler(fileName, 10, 2) - if err != nil { - t.Fatal(err) - } - - buf := make([]byte, 10) - - h.Write(buf) - - h.Write(buf) - - if _, err := os.Stat(fileName + ".1"); err != nil { - t.Fatal(err) - } - - if _, err := os.Stat(fileName + ".2"); err == nil { - t.Fatal(err) - } - - h.Write(buf) - if _, err := os.Stat(fileName + ".2"); err != nil { - t.Fatal(err) - } - - h.Close() - - os.RemoveAll(path) -} diff --git a/Godeps/_workspace/src/github.com/siddontang/go/num/num_test.go b/Godeps/_workspace/src/github.com/siddontang/go/num/num_test.go deleted file mode 100644 index 9c64481..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/num/num_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package num - -import ( - "testing" -) - -func testMin(t *testing.T, v1 interface{}, v2 interface{}, v interface{}) { - var c interface{} - switch i1 := v1.(type) { - case int: - c = MinInt(i1, v2.(int)) - case int8: - c = MinInt8(i1, v2.(int8)) - case int16: - c = MinInt16(i1, v2.(int16)) - case int32: - c = MinInt32(i1, v2.(int32)) - case int64: - c = MinInt64(i1, v2.(int64)) - case uint: - c = MinUint(i1, v2.(uint)) - case uint8: - c = MinUint8(i1, v2.(uint8)) - case uint16: - c = MinUint16(i1, v2.(uint16)) - case uint32: - c = MinUint32(i1, v2.(uint32)) - case uint64: - c = MinUint64(i1, v2.(uint64)) - default: - t.Fatalf("invalid type %T", t) - } - - if c != v { - t.Fatalf("invalid %v(%T) != %v(%T)", c, c, v, v) - } -} - -func TestMin(t *testing.T) { - testMin(t, int(1), int(2), int(1)) - testMin(t, int(1), int(1), int(1)) - - testMin(t, int8(1), int8(2), int8(1)) - testMin(t, int8(1), int8(1), int8(1)) - - testMin(t, int16(1), int16(2), int16(1)) - testMin(t, int16(1), int16(1), int16(1)) - - testMin(t, int32(1), int32(2), int32(1)) - testMin(t, int32(1), int32(1), int32(1)) - - testMin(t, int64(1), int64(2), int64(1)) - testMin(t, int64(1), int64(1), int64(1)) - - testMin(t, uint(1), uint(2), uint(1)) - testMin(t, uint(1), uint(1), uint(1)) - - testMin(t, uint8(1), uint8(2), uint8(1)) - testMin(t, uint8(1), uint8(1), uint8(1)) - - testMin(t, uint16(1), uint16(2), uint16(1)) - testMin(t, uint16(1), uint16(1), uint16(1)) - - testMin(t, uint32(1), uint32(2), uint32(1)) - testMin(t, uint32(1), uint32(1), uint32(1)) - - testMin(t, uint64(1), uint64(2), uint64(1)) - testMin(t, uint64(1), uint64(1), uint64(1)) -} - -func testMax(t *testing.T, v1 interface{}, v2 interface{}, v interface{}) { - var c interface{} - switch i1 := v1.(type) { - case int: - c = MaxInt(i1, v2.(int)) - case int8: - c = MaxInt8(i1, v2.(int8)) - case int16: - c = MaxInt16(i1, v2.(int16)) - case int32: - c = MaxInt32(i1, v2.(int32)) - case int64: - c = MaxInt64(i1, v2.(int64)) - case uint: - c = MaxUint(i1, v2.(uint)) - case uint8: - c = MaxUint8(i1, v2.(uint8)) - case uint16: - c = MaxUint16(i1, v2.(uint16)) - case uint32: - c = MaxUint32(i1, v2.(uint32)) - case uint64: - c = MaxUint64(i1, v2.(uint64)) - default: - t.Fatalf("invalid type %T", t) - } - - if c != v { - t.Fatalf("invalid %v(%T) != %v(%T)", c, c, v, v) - } -} - -func TestMax(t *testing.T) { - testMax(t, int(1), int(2), int(2)) - testMax(t, int(1), int(1), int(1)) - - testMax(t, int8(1), int8(2), int8(2)) - testMax(t, int8(1), int8(1), int8(1)) - - testMax(t, int16(1), int16(2), int16(2)) - testMax(t, int16(1), int16(1), int16(1)) - - testMax(t, int32(1), int32(2), int32(2)) - testMax(t, int32(1), int32(1), int32(1)) - - testMax(t, int64(1), int64(2), int64(2)) - testMax(t, int64(1), int64(1), int64(1)) - - testMax(t, uint(1), uint(2), uint(2)) - testMax(t, uint(1), uint(1), uint(1)) - - testMax(t, uint8(1), uint8(2), uint8(2)) - testMax(t, uint8(1), uint8(1), uint8(1)) - - testMax(t, uint16(1), uint16(2), uint16(2)) - testMax(t, uint16(1), uint16(1), uint16(1)) - - testMax(t, uint32(1), uint32(2), uint32(2)) - testMax(t, uint32(1), uint32(1), uint32(1)) - - testMax(t, uint64(1), uint64(2), uint64(2)) - testMax(t, uint64(1), uint64(1), uint64(1)) -} - -func TestBytes(t *testing.T) { - if BytesToUint64(Uint64ToBytes(1)) != 1 { - t.Fatal("convert fail") - } - - if BytesToUint32(Uint32ToBytes(1)) != 1 { - t.Fatal("convert fail") - } - - if BytesToUint16(Uint16ToBytes(1)) != 1 { - t.Fatal("convert fail") - } - - if BytesToInt64(Int64ToBytes(-1)) != -1 { - t.Fatal("convert fail") - } - - if BytesToInt32(Int32ToBytes(-1)) != -1 { - t.Fatal("convert fail") - } - - if BytesToInt16(Int16ToBytes(-1)) != -1 { - t.Fatal("convert fail") - } -} - -func TestStr(t *testing.T) { - if v, err := ParseUint64(FormatUint64(1)); err != nil { - t.Fatal(err) - } else if v != 1 { - t.Fatal(v) - } - - if v, err := ParseUint32(FormatUint32(1)); err != nil { - t.Fatal(err) - } else if v != 1 { - t.Fatal(v) - } - - if v, err := ParseUint16(FormatUint16(1)); err != nil { - t.Fatal(err) - } else if v != 1 { - t.Fatal(v) - } - - if v, err := ParseUint8(FormatUint8(1)); err != nil { - t.Fatal(err) - } else if v != 1 { - t.Fatal(v) - } - - if v, err := ParseInt64(FormatInt64(-1)); err != nil { - t.Fatal(err) - } else if v != -1 { - t.Fatal(v) - } - - if v, err := ParseInt32(FormatInt32(-1)); err != nil { - t.Fatal(err) - } else if v != -1 { - t.Fatal(v) - } - - if v, err := ParseInt16(FormatInt16(-1)); err != nil { - t.Fatal(err) - } else if v != -1 { - t.Fatal(v) - } - - if v, err := ParseInt8(FormatInt8(-1)); err != nil { - t.Fatal(err) - } else if v != -1 { - t.Fatal(v) - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/go/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/siddontang/go/snappy/snappy_test.go deleted file mode 100644 index 7ba8392..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/snappy/snappy_test.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path/filepath" - "strings" - "testing" -) - -var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - -func roundtrip(b, ebuf, dbuf []byte) error { - e, err := Encode(ebuf, b) - if err != nil { - return fmt.Errorf("encoding error: %v", err) - } - d, err := Decode(dbuf, e) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if !bytes.Equal(b, d) { - return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rand.Seed(27354294) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i, _ := range b { - b[i] = uint8(rand.Uint32()) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i, _ := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func benchDecode(b *testing.B, src []byte) { - encoded, err := Encode(nil, src) - if err != nil { - b.Fatal(err) - } - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Decode(src, encoded) - } -} - -func benchEncode(b *testing.B, src []byte) { - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - dst := make([]byte, MaxEncodedLen(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Encode(dst, src) - } -} - -func readFile(b *testing.B, filename string) []byte { - src, err := ioutil.ReadFile(filename) - if err != nil { - b.Fatalf("failed reading %s: %s", filename, err) - } - if len(src) == 0 { - b.Fatalf("%s has zero length", filename) - } - return src -} - -// expand returns a slice of length n containing repeated copies of src. -func expand(src []byte, n int) []byte { - dst := make([]byte, n) - for x := dst; len(x) > 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -// testFiles' values are copied directly from -// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc. -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string -}{ - {"html", "html"}, - {"urls", "urls.10K"}, - {"jpg", "house.jpg"}, - {"pdf", "mapreduce-osdi-1.pdf"}, - {"html4", "html_x_4"}, - {"cp", "cp.html"}, - {"c", "fields.c"}, - {"lsp", "grammar.lsp"}, - {"xls", "kennedy.xls"}, - {"txt1", "alice29.txt"}, - {"txt2", "asyoulik.txt"}, - {"txt3", "lcet10.txt"}, - {"txt4", "plrabn12.txt"}, - {"bin", "ptt5"}, - {"sum", "sum"}, - {"man", "xargs.1"}, - {"pb", "geo.protodata"}, - {"gaviota", "kppkn.gtb"}, -} - -// The test data files are present at this canonical URL. -const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/" - -func downloadTestdata(basename string) (errRet error) { - filename := filepath.Join("testdata", basename) - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - resp, err := http.Get(baseURL + basename) - if err != nil { - return fmt.Errorf("failed to download %s: %s", baseURL+basename, err) - } - defer resp.Body.Close() - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to write %s: %s", filename, err) - } - return nil -} - -func benchFile(b *testing.B, n int, decode bool) { - filename := filepath.Join("testdata", testFiles[n].filename) - if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { - if !*download { - b.Fatal("test data not found; skipping benchmark without the -download flag") - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { - b.Fatalf("failed to create testdata: %s", err) - } - for _, tf := range testFiles { - if err := downloadTestdata(tf.filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - } - } - data := readFile(b, filename) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) } -func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) } -func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) } -func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) } -func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) } -func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } -func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) } -func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) } -func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) } -func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) } -func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) } -func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) } diff --git a/Godeps/_workspace/src/github.com/siddontang/go/sync2/atomic_test.go b/Godeps/_workspace/src/github.com/siddontang/go/sync2/atomic_test.go deleted file mode 100644 index 040397f..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/sync2/atomic_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2013, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync2 - -import ( - "testing" -) - -func TestAtomicString(t *testing.T) { - var s AtomicString - if s.Get() != "" { - t.Errorf("want empty, got %s", s.Get()) - } - s.Set("a") - if s.Get() != "a" { - t.Errorf("want a, got %s", s.Get()) - } - if s.CompareAndSwap("b", "c") { - t.Errorf("want false, got true") - } - if s.Get() != "a" { - t.Errorf("want a, got %s", s.Get()) - } - if !s.CompareAndSwap("a", "c") { - t.Errorf("want true, got false") - } - if s.Get() != "c" { - t.Errorf("want c, got %s", s.Get()) - } -} - -func TestAtomicBool(t *testing.T) { - var b AtomicBool - if b.Get() != false { - t.Fatal("must false") - } - - b.Set(true) - - if b.Get() != true { - t.Fatal("must true") - } - - b.Set(false) - - if b.Get() != false { - t.Fatal("must false") - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/go/sync2/semaphore_test.go b/Godeps/_workspace/src/github.com/siddontang/go/sync2/semaphore_test.go deleted file mode 100644 index 8c48694..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/go/sync2/semaphore_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync2 - -import ( - "testing" - "time" -) - -func TestSemaNoTimeout(t *testing.T) { - s := NewSemaphore(1) - s.Acquire() - released := false - go func() { - time.Sleep(10 * time.Millisecond) - released = true - s.Release() - }() - s.Acquire() - if !released { - t.Errorf("want true, got false") - } -} - -func TestSemaTimeout(t *testing.T) { - s := NewSemaphore(1) - s.Acquire() - go func() { - time.Sleep(10 * time.Millisecond) - s.Release() - }() - if ok := s.AcquireTimeout(5 * time.Millisecond); ok { - t.Errorf("want false, got true") - } - time.Sleep(10 * time.Millisecond) - if ok := s.AcquireTimeout(5 * time.Millisecond); !ok { - t.Errorf("want true, got false") - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/goredis_test.go b/Godeps/_workspace/src/github.com/siddontang/goredis/goredis_test.go deleted file mode 100644 index f96efcb..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/goredis/goredis_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package goredis - -import ( - "github.com/alicebob/miniredis" - "testing" -) - -func Test(t *testing.T) { - s, err := miniredis.Run() - if err != nil { - t.Fatal(err) - } - defer s.Close() - - s.RequireAuth("123456") - - addr := s.Addr() - - c := NewClient(addr, "123456") - defer c.Close() - - conn, err := c.Get() - if err != nil { - t.Fatal(err) - } - defer conn.Close() - - if pong, err := String(conn.Do("PING")); err != nil { - t.Fatal(err) - } else if pong != "PONG" { - t.Fatal(pong) - } - - if pong, err := String(conn.Do("PING")); err != nil { - t.Fatal(err) - } else if pong != "PONG" { - t.Fatal(pong) - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/resp_test.go b/Godeps/_workspace/src/github.com/siddontang/goredis/resp_test.go deleted file mode 100644 index c271797..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/goredis/resp_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package goredis - -import ( - "bufio" - "bytes" - "reflect" - "testing" -) - -func TestResp(t *testing.T) { - var buf bytes.Buffer - - reader := NewRespReader(bufio.NewReader(&buf)) - writer := NewRespWriter(bufio.NewWriter(&buf)) - - if err := writer.WriteCommand("SELECT", 1); err != nil { - t.Fatal(err) - } else { - if reqs, err := reader.ParseRequest(); err != nil { - t.Fatal(err) - } else if len(reqs) != 2 { - t.Fatal(len(reqs)) - } else if string(reqs[0]) != "SELECT" { - t.Fatal(string(reqs[0])) - } else if string(reqs[1]) != "1" { - t.Fatal(string(reqs[1])) - } - } - - if err := writer.FlushInteger(10); err != nil { - t.Fatal(err) - } else { - if n, err := Int64(reader.Parse()); err != nil { - t.Fatal(err) - } else if n != 10 { - t.Fatal(n) - } - } - - if err := writer.FlushString("abc"); err != nil { - t.Fatal(err) - } else { - if s, err := String(reader.Parse()); err != nil { - t.Fatal(err) - } else if s != "abc" { - t.Fatal(s) - } - } - - if err := writer.FlushBulk([]byte("abc")); err != nil { - t.Fatal(err) - } else { - if s, err := String(reader.Parse()); err != nil { - t.Fatal(err) - } else if s != "abc" { - t.Fatal(s) - } - } - - ay := []interface{}{[]byte("SET"), []byte("a"), []byte("1")} - if err := writer.FlushArray(ay); err != nil { - t.Fatal(err) - } else { - if oy, err := reader.Parse(); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(oy, ay) { - t.Fatalf("%#v", oy) - } - } - - e := Error("hello world") - if err := writer.FlushError(e); err != nil { - t.Fatal(err) - } else { - if ee, err := reader.Parse(); err != nil { - t.Fatal("must error") - } else if !reflect.DeepEqual(e, ee) { - t.Fatal(ee) - } - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/loader_test.go b/Godeps/_workspace/src/github.com/siddontang/rdb/loader_test.go deleted file mode 100644 index db8e2ee..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/rdb/loader_test.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2014 Wandoujia Inc. All Rights Reserved. -// Licensed under the MIT (MIT-LICENSE.txt) license. - -package rdb - -import ( - "bytes" - "encoding/hex" - "fmt" - "math" - "strconv" - "strings" - "testing" -) - -func AssertNoError(t *testing.T, err error) { - if err == nil { - return - } - - t.Fatal(err) -} - -func Assert(t *testing.T, b bool) { - if b { - return - } - t.Fatal("assertion failed") -} - -func DecodeHexRdb(t *testing.T, s string, n int) map[string]*Entry { - p, err := hex.DecodeString(strings.NewReplacer("\t", "", "\r", "", "\n", "", " ", "").Replace(s)) - AssertNoError(t, err) - r := bytes.NewReader(p) - l := NewLoader(r) - AssertNoError(t, l.LoadHeader()) - entries := make(map[string]*Entry) - var i int = 0 - for { - e, err := l.LoadEntry() - AssertNoError(t, err) - if e == nil { - break - } - Assert(t, e.DB == 0) - entries[string(e.Key)] = e - i++ - } - AssertNoError(t, l.LoadChecksum()) - Assert(t, r.Len() == 0) - Assert(t, len(entries) == i && i == n) - return entries -} - -func getobj(t *testing.T, entries map[string]*Entry, key string) (*Entry, interface{}) { - e := entries[key] - Assert(t, e != nil) - val, err := DecodeDump(e.ValDump) - AssertNoError(t, err) - return e, val -} - -/* -#!/bin/bash -./redis-cli flushall -for i in 1 255 256 65535 65536 2147483647 2147483648 4294967295 4294967296 -2147483648; do - ./redis-cli set string_${i} ${i} -done -./redis-cli save && xxd -p -c 32 dump.rdb -*/ -func TestLoadIntString(t *testing.T) { - s := ` - 524544495330303036fe00000a737472696e675f323535c1ff00000873747269 - 6e675f31c0010011737472696e675f343239343936373239360a343239343936 - 373239360011737472696e675f343239343936373239350a3432393439363732 - 39350012737472696e675f2d32313437343833363438c200000080000c737472 - 696e675f3635353335c2ffff00000011737472696e675f323134373438333634 - 380a32313437343833363438000c737472696e675f3635353336c20000010000 - 0a737472696e675f323536c100010011737472696e675f323134373438333634 - 37c2ffffff7fffe49d9f131fb5c3b5 - ` - values := []int{1, 255, 256, 65535, 65536, 2147483647, 2147483648, 4294967295, 4294967296, -2147483648} - entries := DecodeHexRdb(t, s, len(values)) - for _, value := range values { - key := fmt.Sprintf("string_%d", value) - _, obj := getobj(t, entries, key) - val := obj.(String) - Assert(t, bytes.Equal([]byte(val), []byte(strconv.Itoa(value)))) - } -} - -/* -#!/bin/bash -./redis-cli flushall -./redis-cli set string_ttls string_ttls -./redis-cli expireat string_ttls 1500000000 -./redis-cli set string_ttlms string_ttlms -./redis-cli pexpireat string_ttlms 1500000000000 -./redis-cli save && xxd -p -c 32 dump.rdb -*/ -func TestLoadStringTTL(t *testing.T) { - s := ` - 524544495330303036fe00fc0098f73e5d010000000c737472696e675f74746c - 6d730c737472696e675f74746c6d73fc0098f73e5d010000000b737472696e67 - 5f74746c730b737472696e675f74746c73ffd15acd935a3fe949 - ` - expireat := uint64(1500000000000) - entries := DecodeHexRdb(t, s, 2) - keys := []string{"string_ttls", "string_ttlms"} - for _, key := range keys { - e, obj := getobj(t, entries, key) - val := obj.(String) - Assert(t, bytes.Equal([]byte(val), []byte(key))) - Assert(t, e.ExpireAt == expireat) - } -} - -/* -#!/bin/bash -s="01" -for ((i=0;i<15;i++)); do - s=$s$s -done -./redis-cli flushall -./redis-cli set string_long $s -./redis-cli save && xxd -p -c 32 dump.rdb -*/ -func TestLoadLongString(t *testing.T) { - s := ` - 524544495330303036fe00000b737472696e675f6c6f6e67c342f28000010000 - 02303130e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0 - ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01 - e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff - 01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0 - ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01 - e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff - 01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0 - ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01 - e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff - 01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0 - ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01 - e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff - 01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0 - ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01 - e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff - 01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0 - ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01 - e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff - 01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0 - ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01 - e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff - 01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0 - ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01e0ff01 - e0ff01e0ff01e0ff01e0ff01e03201013031ffdfdb02bd6d5da5e6 - ` - entries := DecodeHexRdb(t, s, 1) - _, obj := getobj(t, entries, "string_long") - val := []byte(obj.(String)) - for i := 0; i < (1 << 15); i++ { - var c uint8 = '0' - if i%2 != 0 { - c = '1' - } - Assert(t, val[i] == c) - } -} - -/* -#!/bin/bash -./redis-cli flushall -for ((i=0;i<256;i++)); do - ./redis-cli rpush list_lzf 0 - ./redis-cli rpush list_lzf 1 -done -./redis-cli save && xxd -p -c 32 dump.rdb -*/ -func TestLoadListZipmap(t *testing.T) { - s := ` - 524544495330303036fe000a086c6973745f6c7a66c31f440b040b0400000820 - 0306000200f102f202e0ff03e1ff07e1ff07e1d90701f2ffff6a1c2d51c02301 - 16 - ` - entries := DecodeHexRdb(t, s, 1) - _, obj := getobj(t, entries, "list_lzf") - val := obj.(List) - Assert(t, len(val) == 512) - for i := 0; i < 256; i++ { - var s string = "0" - if i%2 != 0 { - s = "1" - } - Assert(t, string(val[i]) == s) - } -} - -/* -#!/bin/bash -./redis-cli flushall -for ((i=0;i<32;i++)); do - ./redis-cli rpush list ${i} -done -./redis-cli save && xxd -p -c 32 dump.rdb -*/ -func TestLoadList(t *testing.T) { - s := ` - 524544495330303036fe0001046c69737420c000c001c002c003c004c005c006 - c007c008c009c00ac00bc00cc00dc00ec00fc010c011c012c013c014c015c016 - c017c018c019c01ac01bc01cc01dc01ec01fff756ea1fa90adefe3 - ` - entries := DecodeHexRdb(t, s, 1) - _, obj := getobj(t, entries, "list") - val := obj.(List) - Assert(t, len(val) == 32) - for i := 0; i < 32; i++ { - Assert(t, string(val[i]) == strconv.Itoa(i)) - } -} - -/* -#!/bin/bash -./redis-cli flushall -for ((i=0;i<16;i++)); do - ./redis-cli sadd set1 ${i} -done -for ((i=0;i<32;i++)); do - ./redis-cli sadd set2 ${i} -done -./redis-cli save && xxd -p -c 32 dump.rdb -*/ -func TestLoadSetAndSetIntset(t *testing.T) { - s := ` - 524544495330303036fe0002047365743220c016c00dc01bc012c01ac004c014 - c002c017c01dc01cc013c019c01ec008c006c000c001c007c00fc009c01fc00e - c003c00ac015c010c00bc018c011c00cc0050b04736574312802000000100000 - 0000000100020003000400050006000700080009000a000b000c000d000e000f - 00ff3a0a9697324d19c3 - ` - entries := DecodeHexRdb(t, s, 2) - - _, obj1 := getobj(t, entries, "set1") - val1 := obj1.(Set) - set1 := make(map[string]bool) - for _, mem := range val1 { - set1[string(mem)] = true - } - Assert(t, len(set1) == 16) - Assert(t, len(set1) == len(val1)) - for i := 0; i < 16; i++ { - _, ok := set1[strconv.Itoa(i)] - Assert(t, ok) - } - - _, obj2 := getobj(t, entries, "set2") - val2 := obj2.(Set) - set2 := make(map[string]bool) - for _, mem := range val2 { - set2[string(mem)] = true - } - Assert(t, len(set2) == 32) - Assert(t, len(set2) == len(val2)) - for i := 0; i < 32; i++ { - _, ok := set2[strconv.Itoa(i)] - Assert(t, ok) - } -} - -/* -#!/bin/bash -./redis-cli flushall -for ((i=0;i<16;i++)); do - ./redis-cli hset hash1 ${i} -done -for ((i=-16;i<16;i++)); do - ./redis-cli hset hash2 ${i} -done -./redis-cli save && xxd -p -c 32 dump.rdb -*/ -func TestLoadHashAndHashZiplist(t *testing.T) { - s := ` - 524544495330303036fe000405686173683220c00dc00dc0fcc0fcc0ffc0ffc0 - 04c004c002c002c0fbc0fbc0f0c0f0c0f9c0f9c008c008c0fac0fac006c006c0 - 00c000c001c001c0fec0fec007c007c0f6c0f6c00fc00fc009c009c0f7c0f7c0 - fdc0fdc0f1c0f1c0f2c0f2c0f3c0f3c00ec00ec003c003c00ac00ac00bc00bc0 - f8c0f8c00cc00cc0f5c0f5c0f4c0f4c005c0050d056861736831405151000000 - 4d000000200000f102f102f202f202f302f302f402f402f502f502f602f602f7 - 02f702f802f802f902f902fa02fa02fb02fb02fc02fc02fd02fd02fe0d03fe0d - 03fe0e03fe0e03fe0f03fe0fffffa423d3036c15e534 - ` - entries := DecodeHexRdb(t, s, 2) - - _, obj1 := getobj(t, entries, "hash1") - val1 := obj1.(Hash) - hash1 := make(map[string]string) - for _, ent := range val1 { - hash1[string(ent.Field)] = string(ent.Value) - } - Assert(t, len(hash1) == 16) - Assert(t, len(hash1) == len(val1)) - for i := 0; i < 16; i++ { - s := strconv.Itoa(i) - Assert(t, hash1[s] == s) - } - - _, obj2 := getobj(t, entries, "hash2") - val2 := obj2.(Hash) - hash2 := make(map[string]string) - for _, ent := range val2 { - hash2[string(ent.Field)] = string(ent.Value) - } - Assert(t, len(hash2) == 32) - Assert(t, len(hash2) == len(val2)) - for i := -16; i < 16; i++ { - s := strconv.Itoa(i) - Assert(t, hash2[s] == s) - } -} - -/* -#!/bin/bash -./redis-cli flushall -for ((i=0;i<16;i++)); do - ./redis-cli zadd zset1 ${i} ${i} -done -for ((i=0;i<32;i++)); do - ./redis-cli zadd zset2 -${i} ${i} -done -./redis-cli save && xxd -p -c 32 dump.rdb -*/ -func TestLoadZSetAndZSetZiplist(t *testing.T) { - s := ` - 524544495330303036fe0003057a7365743220c016032d3232c00d032d3133c0 - 1b032d3237c012032d3138c01a032d3236c004022d34c014032d3230c002022d - 32c017032d3233c01d032d3239c01c032d3238c013032d3139c019032d3235c0 - 1e032d3330c008022d38c006022d36c000022d30c001022d31c007022d37c009 - 022d39c00f032d3135c01f032d3331c00e032d3134c003022d33c00a032d3130 - c015032d3231c010032d3136c00b032d3131c018032d3234c011032d3137c00c - 032d3132c005022d350c057a736574314051510000004d000000200000f102f1 - 02f202f202f302f302f402f402f502f502f602f602f702f702f802f802f902f9 - 02fa02fa02fb02fb02fc02fc02fd02fd02fe0d03fe0d03fe0e03fe0e03fe0f03 - fe0fffff2addedbf4f5a8f93 - ` - entries := DecodeHexRdb(t, s, 2) - - _, obj1 := getobj(t, entries, "zset1") - val1 := obj1.(ZSet) - zset1 := make(map[string]float64) - for _, ent := range val1 { - zset1[string(ent.Member)] = ent.Score - } - Assert(t, len(zset1) == 16) - Assert(t, len(zset1) == len(val1)) - for i := 0; i < 16; i++ { - s := strconv.Itoa(i) - score, ok := zset1[s] - Assert(t, ok) - Assert(t, math.Abs(score-float64(i)) < 1e-10) - } - - _, obj2 := getobj(t, entries, "zset2") - val2 := obj2.(ZSet) - zset2 := make(map[string]float64) - for _, ent := range val2 { - zset2[string(ent.Member)] = ent.Score - } - Assert(t, len(zset2) == 32) - Assert(t, len(zset2) == len(val2)) - for i := 0; i < 32; i++ { - s := strconv.Itoa(i) - score, ok := zset2[s] - Assert(t, ok) - Assert(t, math.Abs(score+float64(i)) < 1e-10) - } -} diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/rdb_test.go b/Godeps/_workspace/src/github.com/siddontang/rdb/rdb_test.go deleted file mode 100644 index 88095d2..0000000 --- a/Godeps/_workspace/src/github.com/siddontang/rdb/rdb_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package rdb - -import ( - "reflect" - "testing" -) - -func TestCodec(t *testing.T) { - testCodec(String("abc"), t) -} - -func testCodec(obj interface{}, t *testing.T) { - b, err := Dump(obj) - if err != nil { - t.Fatal(err) - } - - if o, err := DecodeDump(b); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(obj, o) { - t.Fatal("must equal") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go deleted file mode 100644 index 7fc842f..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/memdb" -) - -type tbRec struct { - kt kType - key, value []byte -} - -type testBatch struct { - rec []*tbRec -} - -func (p *testBatch) Put(key, value []byte) { - p.rec = append(p.rec, &tbRec{ktVal, key, value}) -} - -func (p *testBatch) Delete(key []byte) { - p.rec = append(p.rec, &tbRec{ktDel, key, nil}) -} - -func compareBatch(t *testing.T, b1, b2 *Batch) { - if b1.seq != b2.seq { - t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) - } - if b1.Len() != b2.Len() { - t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len()) - } - p1, p2 := new(testBatch), new(testBatch) - err := b1.Replay(p1) - if err != nil { - t.Fatal("error when replaying batch 1: ", err) - } - err = b2.Replay(p2) - if err != nil { - t.Fatal("error when replaying batch 2: ", err) - } - for i := range p1.rec { - r1, r2 := p1.rec[i], p2.rec[i] - if r1.kt != r2.kt { - t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt) - } - if !bytes.Equal(r1.key, r2.key) { - t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) - } - if r1.kt == ktVal { - if !bytes.Equal(r1.value, r2.value) { - t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) - } - } - } -} - -func TestBatch_EncodeDecode(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("k"), []byte("")) - b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) - b1.Delete([]byte("key10000")) - b1.Delete([]byte("k")) - buf := b1.encode() - b2 := new(Batch) - err := b2.decode(0, buf) - if err != nil { - t.Error("error when decoding batch: ", err) - } - compareBatch(t, b1, b2) -} - -func TestBatch_Append(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("foo"), []byte("foovalue")) - b1.Put([]byte("bar"), []byte("barvalue")) - b2a := new(Batch) - b2a.seq = 10009 - b2a.Put([]byte("key1"), []byte("value1")) - b2a.Put([]byte("key2"), []byte("value2")) - b2a.Delete([]byte("key1")) - b2b := new(Batch) - b2b.Put([]byte("foo"), []byte("foovalue")) - b2b.Put([]byte("bar"), []byte("barvalue")) - b2a.append(b2b) - compareBatch(t, b1, b2a) -} - -func TestBatch_Size(t *testing.T) { - b := new(Batch) - for i := 0; i < 2; i++ { - b.Put([]byte("key1"), []byte("value1")) - b.Put([]byte("key2"), []byte("value2")) - b.Delete([]byte("key1")) - b.Put([]byte("foo"), []byte("foovalue")) - b.Put([]byte("bar"), []byte("barvalue")) - mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) - b.memReplay(mem) - if b.size() != mem.Size() { - t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) - } - b.Reset() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go deleted file mode 100644 index 0dd60fd..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package leveldb - -import ( - "sync/atomic" - "testing" -) - -func BenchmarkDBReadConcurrent(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - for pb.Next() && iter.Next() { - } - }) -} - -func BenchmarkDBReadConcurrent2(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - var dir uint32 - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - if atomic.AddUint32(&dir, 1)%2 == 0 { - for pb.Next() && iter.Next() { - } - } else { - if pb.Next() && iter.Last() { - for pb.Next() && iter.Prev() { - } - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go deleted file mode 100644 index 91b4267..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -func randomString(r *rand.Rand, n int) []byte { - b := new(bytes.Buffer) - for i := 0; i < n; i++ { - b.WriteByte(' ' + byte(r.Intn(95))) - } - return b.Bytes() -} - -func compressibleStr(r *rand.Rand, frac float32, n int) []byte { - nn := int(float32(n) * frac) - rb := randomString(r, nn) - b := make([]byte, 0, n+nn) - for len(b) < n { - b = append(b, rb...) - } - return b[:n] -} - -type valueGen struct { - src []byte - pos int -} - -func newValueGen(frac float32) *valueGen { - v := new(valueGen) - r := rand.New(rand.NewSource(301)) - v.src = make([]byte, 0, 1048576+100) - for len(v.src) < 1048576 { - v.src = append(v.src, compressibleStr(r, frac, 100)...) - } - return v -} - -func (v *valueGen) get(n int) []byte { - if v.pos+n > len(v.src) { - v.pos = 0 - } - v.pos += n - return v.src[v.pos-n : v.pos] -} - -var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) - -type dbBench struct { - b *testing.B - stor storage.Storage - db *DB - - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions - - keys, values [][]byte -} - -func openDBBench(b *testing.B, noCompress bool) *dbBench { - _, err := os.Stat(benchDB) - if err == nil { - err = os.RemoveAll(benchDB) - if err != nil { - b.Fatal("cannot remove old db: ", err) - } - } - - p := &dbBench{ - b: b, - o: &opt.Options{}, - ro: &opt.ReadOptions{}, - wo: &opt.WriteOptions{}, - } - p.stor, err = storage.OpenFile(benchDB) - if err != nil { - b.Fatal("cannot open stor: ", err) - } - if noCompress { - p.o.Compression = opt.NoCompression - } - - p.db, err = Open(p.stor, p.o) - if err != nil { - b.Fatal("cannot open db: ", err) - } - - runtime.GOMAXPROCS(runtime.NumCPU()) - return p -} - -func (p *dbBench) reopen() { - p.db.Close() - var err error - p.db, err = Open(p.stor, p.o) - if err != nil { - p.b.Fatal("Reopen: got error: ", err) - } -} - -func (p *dbBench) populate(n int) { - p.keys, p.values = make([][]byte, n), make([][]byte, n) - v := newValueGen(0.5) - for i := range p.keys { - p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) - } -} - -func (p *dbBench) randomize() { - m := len(p.keys) - times := m * 2 - r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) - for n := 0; n < times; n++ { - i, j := r1.Int()%m, r2.Int()%m - if i == j { - continue - } - p.keys[i], p.keys[j] = p.keys[j], p.keys[i] - p.values[i], p.values[j] = p.values[j], p.values[i] - } -} - -func (p *dbBench) writes(perBatch int) { - b := p.b - db := p.db - - n := len(p.keys) - m := n / perBatch - if n%perBatch > 0 { - m++ - } - batches := make([]Batch, m) - j := 0 - for i := range batches { - first := true - for ; j < n && ((j+1)%perBatch != 0 || first); j++ { - first = false - batches[i].Put(p.keys[j], p.values[j]) - } - } - runtime.GC() - - b.ResetTimer() - b.StartTimer() - for i := range batches { - err := db.Write(&(batches[i]), p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) gc() { - p.keys, p.values = nil, nil - runtime.GC() -} - -func (p *dbBench) puts() { - b := p.b - db := p.db - - b.ResetTimer() - b.StartTimer() - for i := range p.keys { - err := db.Put(p.keys[i], p.values[i], p.wo) - if err != nil { - b.Fatal("put failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) fill() { - b := p.b - db := p.db - - perBatch := 10000 - batch := new(Batch) - for i, n := 0, len(p.keys); i < n; { - first := true - for ; i < n && ((i+1)%perBatch != 0 || first); i++ { - first = false - batch.Put(p.keys[i], p.values[i]) - } - err := db.Write(batch, p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - batch.Reset() - } -} - -func (p *dbBench) gets() { - b := p.b - db := p.db - - b.ResetTimer() - for i := range p.keys { - _, err := db.Get(p.keys[i], p.ro) - if err != nil { - b.Error("got error: ", err) - } - } - b.StopTimer() -} - -func (p *dbBench) seeks() { - b := p.b - - iter := p.newIter() - defer iter.Release() - b.ResetTimer() - for i := range p.keys { - if !iter.Seek(p.keys[i]) { - b.Error("value not found for: ", string(p.keys[i])) - } - } - b.StopTimer() -} - -func (p *dbBench) newIter() iterator.Iterator { - iter := p.db.NewIterator(nil, p.ro) - err := iter.Error() - if err != nil { - p.b.Fatal("cannot create iterator: ", err) - } - return iter -} - -func (p *dbBench) close() { - if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { - p.b.Log("Block pool stats: ", bp) - } - p.db.Close() - p.stor.Close() - os.RemoveAll(benchDB) - p.db = nil - p.keys = nil - p.values = nil - runtime.GC() - runtime.GOMAXPROCS(1) -} - -func BenchmarkDBWrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatch(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatchUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBWriteRandomSync(b *testing.B) { - p := openDBBench(b, false) - p.wo.Sync = true - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBPut(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.puts() - p.close() -} - -func BenchmarkDBRead(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadGC(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverse(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverseTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBSeek(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.seeks() - p.close() -} - -func BenchmarkDBSeekRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.seeks() - p.close() -} - -func BenchmarkDBGet(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gets() - p.close() -} - -func BenchmarkDBGetRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.gets() - p.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go deleted file mode 100644 index 175e222..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package cache - -import ( - "math/rand" - "testing" -) - -func BenchmarkLRUCache(b *testing.B) { - c := NewCache(NewLRU(10000)) - - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for pb.Next() { - key := uint64(r.Intn(1000000)) - c.Get(0, key, func() (int, Value) { - return 1, key - }).Release() - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go deleted file mode 100644 index c2a5015..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "math/rand" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" -) - -type int32o int32 - -func (o *int32o) acquire() { - if atomic.AddInt32((*int32)(o), 1) != 1 { - panic("BUG: invalid ref") - } -} - -func (o *int32o) Release() { - if atomic.AddInt32((*int32)(o), -1) != 0 { - panic("BUG: invalid ref") - } -} - -type releaserFunc struct { - fn func() - value Value -} - -func (r releaserFunc) Release() { - if r.fn != nil { - r.fn() - } -} - -func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle { - return c.Get(ns, key, func() (int, Value) { - if relf != nil { - return charge, releaserFunc{relf, value} - } else { - return charge, value - } - }) -} - -func TestCacheMap(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - nsx := []struct { - nobjects, nhandles, concurrent, repeat int - }{ - {10000, 400, 50, 3}, - {100000, 1000, 100, 10}, - } - - var ( - objects [][]int32o - handles [][]unsafe.Pointer - ) - - for _, x := range nsx { - objects = append(objects, make([]int32o, x.nobjects)) - handles = append(handles, make([]unsafe.Pointer, x.nhandles)) - } - - c := NewCache(nil) - - wg := new(sync.WaitGroup) - var done int32 - - for ns, x := range nsx { - for i := 0; i < x.concurrent; i++ { - wg.Add(1) - go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for j := len(objects) * repeat; j >= 0; j-- { - key := uint64(r.Intn(len(objects))) - h := c.Get(uint64(ns), key, func() (int, Value) { - o := &objects[key] - o.acquire() - return 1, o - }) - if v := h.Value().(*int32o); v != &objects[key] { - t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v) - } - if objects[key] != 1 { - t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key]) - } - if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) { - h.Release() - } - } - }(ns, i, x.repeat, objects[ns], handles[ns]) - } - - go func(handles []unsafe.Pointer) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for atomic.LoadInt32(&done) == 0 { - i := r.Intn(len(handles)) - h := (*Handle)(atomic.LoadPointer(&handles[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) { - h.Release() - } - time.Sleep(time.Millisecond) - } - }(handles[ns]) - } - - go func() { - handles := make([]*Handle, 100000) - for atomic.LoadInt32(&done) == 0 { - for i := range handles { - handles[i] = c.Get(999999999, uint64(i), func() (int, Value) { - return 1, 1 - }) - } - for _, h := range handles { - h.Release() - } - } - }() - - wg.Wait() - - atomic.StoreInt32(&done, 1) - - for _, handles0 := range handles { - for i := range handles0 { - h := (*Handle)(atomic.LoadPointer(&handles0[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) { - h.Release() - } - } - } - - for ns, objects0 := range objects { - for i, o := range objects0 { - if o != 0 { - t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o) - } - } - } -} - -func TestCacheMap_NodesAndSize(t *testing.T) { - c := NewCache(nil) - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } - set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 2, nil) - set(c, 1, 1, 3, 3, nil) - set(c, 2, 1, 4, 1, nil) - if c.Nodes() != 4 { - t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes()) - } - if c.Size() != 7 { - t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size()) - } -} - -func TestLRUCache_Capacity(t *testing.T) { - c := NewCache(NewLRU(10)) - if c.Capacity() != 10 { - t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity()) - } - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 2, nil).Release() - set(c, 1, 1, 3, 3, nil).Release() - set(c, 2, 1, 4, 1, nil).Release() - set(c, 2, 2, 5, 1, nil).Release() - set(c, 2, 3, 6, 1, nil).Release() - set(c, 2, 4, 7, 1, nil).Release() - set(c, 2, 5, 8, 1, nil).Release() - if c.Nodes() != 7 { - t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes()) - } - if c.Size() != 10 { - t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size()) - } - c.SetCapacity(9) - if c.Capacity() != 9 { - t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity()) - } - if c.Nodes() != 6 { - t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes()) - } - if c.Size() != 8 { - t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size()) - } -} - -func TestCacheMap_NilValue(t *testing.T) { - c := NewCache(NewLRU(10)) - h := c.Get(0, 0, func() (size int, value Value) { - return 1, nil - }) - if h != nil { - t.Error("cache handle is non-nil") - } - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } -} - -func TestLRUCache_GetLatency(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - const ( - concurrentSet = 30 - concurrentGet = 3 - duration = 3 * time.Second - delay = 3 * time.Millisecond - maxkey = 100000 - ) - - var ( - set, getHit, getAll int32 - getMaxLatency, getDuration int64 - ) - - c := NewCache(NewLRU(5000)) - wg := &sync.WaitGroup{} - until := time.Now().Add(duration) - for i := 0; i < concurrentSet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for time.Now().Before(until) { - c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) { - time.Sleep(delay) - atomic.AddInt32(&set, 1) - return 1, 1 - }).Release() - } - }(i) - } - for i := 0; i < concurrentGet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for { - mark := time.Now() - if mark.Before(until) { - h := c.Get(0, uint64(r.Intn(maxkey)), nil) - latency := int64(time.Now().Sub(mark)) - m := atomic.LoadInt64(&getMaxLatency) - if latency > m { - atomic.CompareAndSwapInt64(&getMaxLatency, m, latency) - } - atomic.AddInt64(&getDuration, latency) - if h != nil { - atomic.AddInt32(&getHit, 1) - h.Release() - } - atomic.AddInt32(&getAll, 1) - } else { - break - } - } - }(i) - } - - wg.Wait() - getAvglatency := time.Duration(getDuration) / time.Duration(getAll) - t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v", - set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency) - - if getAvglatency > delay/3 { - t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency) - } -} - -func TestLRUCache_HitMiss(t *testing.T) { - cases := []struct { - key uint64 - value string - }{ - {1, "vvvvvvvvv"}, - {100, "v1"}, - {0, "v2"}, - {12346, "v3"}, - {777, "v4"}, - {999, "v5"}, - {7654, "v6"}, - {2, "v7"}, - {3, "v8"}, - {9, "v9"}, - } - - setfin := 0 - c := NewCache(NewLRU(1000)) - for i, x := range cases { - set(c, 0, x.key, x.value, len(x.value), func() { - setfin++ - }).Release() - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j <= i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - for i, x := range cases { - finalizerOk := false - c.Delete(0, x.key, func() { - finalizerOk = true - }) - - if !finalizerOk { - t.Errorf("case %d delete finalizer not executed", i) - } - - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j > i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - if setfin != len(cases) { - t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) - } -} - -func TestLRUCache_Eviction(t *testing.T) { - c := NewCache(NewLRU(12)) - o1 := set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 1, nil).Release() - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - set(c, 0, 5, 5, 1, nil).Release() - if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2 - h.Release() - } - set(c, 0, 9, 9, 10, nil).Release() // 5,2,9 - - for _, key := range []uint64{9, 2, 5, 1} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - o1.Release() - for _, key := range []uint64{1, 2, 5} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - for _, key := range []uint64{3, 4, 9} { - h := c.Get(0, key, nil) - if h != nil { - t.Errorf("hit for key '%d'", key) - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } -} - -func TestLRUCache_Evict(t *testing.T) { - c := NewCache(NewLRU(6)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - set(c, 1, 1, 4, 1, nil).Release() - set(c, 1, 2, 5, 1, nil).Release() - set(c, 2, 1, 6, 1, nil).Release() - set(c, 2, 2, 7, 1, nil).Release() - - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d.%d return nil", ns, key) - } - } - } - - if ok := c.Evict(0, 1); !ok { - t.Error("first Cache.Evict on #0.1 return false") - } - if ok := c.Evict(0, 1); ok { - t.Error("second Cache.Evict on #0.1 return true") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value()) - } - - c.EvictNS(1) - if h := c.Get(1, 1, nil); h != nil { - t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value()) - } - if h := c.Get(1, 2, nil); h != nil { - t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value()) - } - - c.EvictAll() - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value()) - } - } - } -} - -func TestLRUCache_Delete(t *testing.T) { - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - - if ok := c.Delete(0, 1, delFunc); !ok { - t.Error("Cache.Delete on #1 return false") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value()) - } - if ok := c.Delete(0, 1, delFunc); ok { - t.Error("Cache.Delete on #1 return true") - } - - h2 := c.Get(0, 2, nil) - if h2 == nil { - t.Error("Cache.Get on #2 return nil") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(1) Cache.Delete on #2 return false") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(2) Cache.Delete on #2 return false") - } - - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - c.Get(0, 2, nil).Release() - - for key := 2; key <= 4; key++ { - if h := c.Get(0, uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d return nil", key) - } - } - - h2.Release() - if h := c.Get(0, 2, nil); h != nil { - t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value()) - } - - if delFuncCalled != 4 { - t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled) - } -} - -func TestLRUCache_Close(t *testing.T) { - relFuncCalled := 0 - relFunc := func() { - relFuncCalled++ - } - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, relFunc).Release() - set(c, 0, 2, 2, 1, relFunc).Release() - - h3 := set(c, 0, 3, 3, 1, relFunc) - if h3 == nil { - t.Error("Cache.Get on #3 return nil") - } - if ok := c.Delete(0, 3, delFunc); !ok { - t.Error("Cache.Delete on #3 return false") - } - - c.Close() - - if relFuncCalled != 3 { - t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled) - } - if delFuncCalled != 1 { - t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go deleted file mode 100644 index a351874..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "io" - "math/rand" - "testing" -) - -const ctValSize = 1000 - -type dbCorruptHarness struct { - dbHarness -} - -func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { - h := new(dbCorruptHarness) - h.init(t, o) - return h -} - -func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { - return newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - }) -} - -func (h *dbCorruptHarness) recover() { - p := &h.dbHarness - t := p.t - - var err error - p.db, err = Recover(h.stor, h.o) - if err != nil { - t.Fatal("Repair: got error: ", err) - } -} - -func (h *dbCorruptHarness) build(n int) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := range rnd.Perm(n) { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Delete(tkey(rnd.Intn(max))) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) { - p := &h.dbHarness - t := p.t - - ff, _ := p.stor.GetFiles(ft) - sff := files(ff) - sff.sort() - if fi < 0 { - fi = len(sff) - 1 - } - if fi >= len(sff) { - t.Fatalf("no such file with type %q with index %d", ft, fi) - } - - file := sff[fi] - - r, err := file.Open() - if err != nil { - t.Fatal("cannot open file: ", err) - } - x, err := r.Seek(0, 2) - if err != nil { - t.Fatal("cannot query file size: ", err) - } - m := int(x) - if _, err := r.Seek(0, 0); err != nil { - t.Fatal(err) - } - - if offset < 0 { - if -offset > m { - offset = 0 - } else { - offset = m + offset - } - } - if offset > m { - offset = m - } - if offset+n > m { - n = m - offset - } - - buf := make([]byte, m) - _, err = io.ReadFull(r, buf) - if err != nil { - t.Fatal("cannot read file: ", err) - } - r.Close() - - for i := 0; i < n; i++ { - buf[offset+i] ^= 0x80 - } - - err = file.Remove() - if err != nil { - t.Fatal("cannot remove old file: ", err) - } - w, err := file.Create() - if err != nil { - t.Fatal("cannot create new file: ", err) - } - _, err = w.Write(buf) - if err != nil { - t.Fatal("cannot write new file: ", err) - } - w.Close() -} - -func (h *dbCorruptHarness) removeAll(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - for _, f := range ff { - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } - } -} - -func (h *dbCorruptHarness) removeOne(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - f := ff[rand.Intn(len(ff))] - h.t.Logf("removing file @%d", f.Num()) - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } -} - -func (h *dbCorruptHarness) check(min, max int) { - p := &h.dbHarness - t := p.t - db := p.db - - var n, badk, badv, missed, good int - iter := db.NewIterator(nil, p.ro) - for iter.Next() { - k := 0 - fmt.Sscanf(string(iter.Key()), "%d", &k) - if k < n { - badk++ - continue - } - missed += k - n - n = k + 1 - if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { - badv++ - } else { - good++ - } - } - err := iter.Error() - iter.Release() - t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", - min, max, good, badk, badv, missed, err) - if good < min || good > max { - t.Errorf("good entries number not in range") - } -} - -func TestCorruptDB_Journal(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.check(100, 100) - h.closeDB() - h.corrupt(storage.TypeJournal, -1, 19, 1) - h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1) - - h.openDB() - h.check(36, 36) - - h.close() -} - -func TestCorruptDB_Table(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(99, 99) - - h.close() -} - -func TestCorruptDB_TableIndex(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10000) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, -2000, 500) - - h.openDB() - h.check(5000, 9999) - - h.close() -} - -func TestCorruptDB_MissingManifest(t *testing.T) { - rnd := rand.New(rand.NewSource(0x0badda7a)) - h := newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - WriteBuffer: 1000 * 60, - }) - - h.build(1000) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.closeDB() - - h.stor.SetIgnoreOpenErr(storage.TypeManifest) - h.removeAll(storage.TypeManifest) - h.openAssert(false) - h.stor.SetIgnoreOpenErr(0) - - h.recover() - h.check(1000, 1000) - h.build(1000) - h.compactMem() - h.compactRange("", "") - h.closeDB() - - h.recover() - h.check(1000, 1000) - - h.close() -} - -func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.put("foo", "v4") - h.put("foo", "v5") - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.compactMem() - h.put("foo", "v4") - h.put("foo", "v5") - h.compactMem() - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_CorruptedManifest(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "hello") - h.compactMem() - h.compactRange("", "") - h.closeDB() - h.corrupt(storage.TypeManifest, -1, 0, 1000) - h.openAssert(false) - - h.recover() - h.getVal("foo", "hello") - - h.close() -} - -func TestCorruptDB_CompactionInputError(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(9, 9) - - h.build(10000) - h.check(10000, 10000) - - h.close() -} - -func TestCorruptDB_UnrelatedKeys(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.put(string(tkey(1000)), string(tval(1000, ctValSize))) - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - h.compactMem() - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - - h.close() -} - -func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(1, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(0, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_MissingTableFiles(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("c", "v2") - h.put("d", "v2") - h.compactMem() - h.put("e", "v3") - h.put("f", "v3") - h.closeDB() - - h.removeOne(storage.TypeTable) - h.openAssert(false) - - h.close() -} - -func TestCorruptDB_RecoverTable(t *testing.T) { - h := newDbCorruptHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - Filter: filter.NewBloomFilter(10), - }) - - h.build(1000) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - seq := h.db.seq - h.closeDB() - h.corrupt(storage.TypeTable, 0, 1000, 1) - h.corrupt(storage.TypeTable, 3, 10000, 1) - // Corrupted filter shouldn't affect recovery. - h.corrupt(storage.TypeTable, 3, 113888, 10) - h.corrupt(storage.TypeTable, -1, 20000, 1) - - h.recover() - if h.db.seq != seq { - t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) - } - h.check(985, 985) - - h.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go deleted file mode 100644 index 9d91ebf..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go +++ /dev/null @@ -1,2701 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "container/list" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func tkey(i int) []byte { - return []byte(fmt.Sprintf("%016d", i)) -} - -func tval(seed, n int) []byte { - r := rand.New(rand.NewSource(int64(seed))) - return randomString(r, n) -} - -type dbHarness struct { - t *testing.T - - stor *testStorage - db *DB - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions -} - -func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { - h := new(dbHarness) - h.init(t, o) - return h -} - -func newDbHarness(t *testing.T) *dbHarness { - return newDbHarnessWopt(t, &opt.Options{}) -} - -func (h *dbHarness) init(t *testing.T, o *opt.Options) { - h.t = t - h.stor = newTestStorage(t) - h.o = o - h.ro = nil - h.wo = nil - - if err := h.openDB0(); err != nil { - // So that it will come after fatal message. - defer h.stor.Close() - h.t.Fatal("Open (init): got error: ", err) - } -} - -func (h *dbHarness) openDB0() (err error) { - h.t.Log("opening DB") - h.db, err = Open(h.stor, h.o) - return -} - -func (h *dbHarness) openDB() { - if err := h.openDB0(); err != nil { - h.t.Fatal("Open: got error: ", err) - } -} - -func (h *dbHarness) closeDB0() error { - h.t.Log("closing DB") - return h.db.Close() -} - -func (h *dbHarness) closeDB() { - if err := h.closeDB0(); err != nil { - h.t.Error("Close: got error: ", err) - } - h.stor.CloseCheck() - runtime.GC() -} - -func (h *dbHarness) reopenDB() { - h.closeDB() - h.openDB() -} - -func (h *dbHarness) close() { - h.closeDB0() - h.db = nil - h.stor.Close() - h.stor = nil - runtime.GC() -} - -func (h *dbHarness) openAssert(want bool) { - db, err := Open(h.stor, h.o) - if err != nil { - if want { - h.t.Error("Open: assert: got error: ", err) - } else { - h.t.Log("Open: assert: got error (expected): ", err) - } - } else { - if !want { - h.t.Error("Open: assert: expect error") - } - db.Close() - } -} - -func (h *dbHarness) write(batch *Batch) { - if err := h.db.Write(batch, h.wo); err != nil { - h.t.Error("Write: got error: ", err) - } -} - -func (h *dbHarness) put(key, value string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { - h.t.Error("Put: got error: ", err) - } -} - -func (h *dbHarness) putMulti(n int, low, hi string) { - for i := 0; i < n; i++ { - h.put(low, "begin") - h.put(hi, "end") - h.compactMem() - } -} - -func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { - t := h.t - db := h.db - - var ( - maxOverlaps uint64 - maxLevel int - ) - v := db.s.version() - for i, tt := range v.tables[1 : len(v.tables)-1] { - level := i + 1 - next := v.tables[level+1] - for _, t := range tt { - r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) - sum := r.size() - if sum > maxOverlaps { - maxOverlaps = sum - maxLevel = level - } - } - } - v.release() - - if maxOverlaps > want { - t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel) - } else { - t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want) - } -} - -func (h *dbHarness) delete(key string) { - t := h.t - db := h.db - - err := db.Delete([]byte(key), h.wo) - if err != nil { - t.Error("Delete: got error: ", err) - } -} - -func (h *dbHarness) assertNumKeys(want int) { - iter := h.db.NewIterator(nil, h.ro) - defer iter.Release() - got := 0 - for iter.Next() { - got++ - } - if err := iter.Error(); err != nil { - h.t.Error("assertNumKeys: ", err) - } - if want != got { - h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) - } -} - -func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { - t := h.t - v, err := db.Get([]byte(key), h.ro) - switch err { - case ErrNotFound: - if expectFound { - t.Errorf("Get: key '%s' not found, want found", key) - } - case nil: - found = true - if !expectFound { - t.Errorf("Get: key '%s' found, want not found", key) - } - default: - t.Error("Get: got error: ", err) - } - return -} - -func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { - return h.getr(h.db, key, expectFound) -} - -func (h *dbHarness) getValr(db Reader, key, value string) { - t := h.t - found, r := h.getr(db, key, true) - if !found { - return - } - rval := string(r) - if rval != value { - t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) - } -} - -func (h *dbHarness) getVal(key, value string) { - h.getValr(h.db, key, value) -} - -func (h *dbHarness) allEntriesFor(key, want string) { - t := h.t - db := h.db - s := db.s - - ikey := newIkey([]byte(key), kMaxSeq, ktVal) - iter := db.newRawIterator(nil, nil) - if !iter.Seek(ikey) && iter.Error() != nil { - t.Error("AllEntries: error during seek, err: ", iter.Error()) - return - } - res := "[ " - first := true - for iter.Valid() { - if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil { - if s.icmp.uCompare(ikey.ukey(), ukey) != 0 { - break - } - if !first { - res += ", " - } - first = false - switch kt { - case ktVal: - res += string(iter.Value()) - case ktDel: - res += "DEL" - } - } else { - if !first { - res += ", " - } - first = false - res += "CORRUPTED" - } - iter.Next() - } - if !first { - res += " " - } - res += "]" - if res != want { - t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) - } -} - -// Return a string that contains all key,value pairs in order, -// formatted like "(k1->v1)(k2->v2)". -func (h *dbHarness) getKeyVal(want string) { - t := h.t - db := h.db - - s, err := db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - res := "" - iter := s.NewIterator(nil, nil) - for iter.Next() { - res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) - } - iter.Release() - - if res != want { - t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) - } - s.Release() -} - -func (h *dbHarness) waitCompaction() { - t := h.t - db := h.db - if err := db.compSendIdle(db.tcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) waitMemCompaction() { - t := h.t - db := h.db - - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) compactMem() { - t := h.t - db := h.db - - t.Log("starting memdb compaction") - - db.writeLockC <- struct{}{} - defer func() { - <-db.writeLockC - }() - - if _, err := db.rotateMem(0); err != nil { - t.Error("compaction error: ", err) - } - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } - - if h.totalTables() == 0 { - t.Error("zero tables after mem compaction") - } - - t.Log("memdb compaction done") -} - -func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { - t := h.t - db := h.db - - var _min, _max []byte - if min != "" { - _min = []byte(min) - } - if max != "" { - _max = []byte(max) - } - - t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max) - - if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { - if wanterr { - t.Log("CompactRangeAt: got error (expected): ", err) - } else { - t.Error("CompactRangeAt: got error: ", err) - } - } else if wanterr { - t.Error("CompactRangeAt: expect error") - } - - t.Log("table range compaction done") -} - -func (h *dbHarness) compactRangeAt(level int, min, max string) { - h.compactRangeAtErr(level, min, max, false) -} - -func (h *dbHarness) compactRange(min, max string) { - t := h.t - db := h.db - - t.Logf("starting DB range compaction: min=%q, max=%q", min, max) - - var r util.Range - if min != "" { - r.Start = []byte(min) - } - if max != "" { - r.Limit = []byte(max) - } - if err := db.CompactRange(r); err != nil { - t.Error("CompactRange: got error: ", err) - } - - t.Log("DB range compaction done") -} - -func (h *dbHarness) sizeOf(start, limit string) uint64 { - sz, err := h.db.SizeOf([]util.Range{ - {[]byte(start), []byte(limit)}, - }) - if err != nil { - h.t.Error("SizeOf: got error: ", err) - } - return sz.Sum() -} - -func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { - sz := h.sizeOf(start, limit) - if sz < low || sz > hi { - h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d", - shorten(start), shorten(limit), low, hi, sz) - } -} - -func (h *dbHarness) getSnapshot() (s *Snapshot) { - s, err := h.db.GetSnapshot() - if err != nil { - h.t.Fatal("GetSnapshot: got error: ", err) - } - return -} -func (h *dbHarness) tablesPerLevel(want string) { - res := "" - nz := 0 - v := h.db.s.version() - for level, tt := range v.tables { - if level > 0 { - res += "," - } - res += fmt.Sprint(len(tt)) - if len(tt) > 0 { - nz = len(res) - } - } - v.release() - res = res[:nz] - if res != want { - h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) - } -} - -func (h *dbHarness) totalTables() (n int) { - v := h.db.s.version() - for _, tt := range v.tables { - n += len(tt) - } - v.release() - return -} - -type keyValue interface { - Key() []byte - Value() []byte -} - -func testKeyVal(t *testing.T, kv keyValue, want string) { - res := string(kv.Key()) + "->" + string(kv.Value()) - if res != want { - t.Errorf("invalid key/value, want=%q, got=%q", want, res) - } -} - -func numKey(num int) string { - return fmt.Sprintf("key%06d", num) -} - -var _bloom_filter = filter.NewBloomFilter(10) - -func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { - for i := 0; i < 4; i++ { - func() { - switch i { - case 0: - case 1: - if o == nil { - o = &opt.Options{Filter: _bloom_filter} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Filter = _bloom_filter - } - case 2: - if o == nil { - o = &opt.Options{Compression: opt.NoCompression} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Compression = opt.NoCompression - } - } - h := newDbHarnessWopt(t, o) - defer h.close() - switch i { - case 3: - h.reopenDB() - } - f(h) - }() - } -} - -func trun(t *testing.T, f func(h *dbHarness)) { - truno(t, nil, f) -} - -func testAligned(t *testing.T, name string, offset uintptr) { - if offset%8 != 0 { - t.Errorf("field %s offset is not 64-bit aligned", name) - } -} - -func Test_FieldsAligned(t *testing.T) { - p1 := new(DB) - testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) - p2 := new(session) - testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum)) - testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) - testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) - testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum)) -} - -func TestDB_Locking(t *testing.T) { - h := newDbHarness(t) - defer h.stor.Close() - h.openAssert(false) - h.closeDB() - h.openAssert(true) -} - -func TestDB_Empty(t *testing.T) { - trun(t, func(h *dbHarness) { - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_ReadWrite(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("bar", "v2") - h.put("foo", "v3") - h.getVal("foo", "v3") - h.getVal("bar", "v2") - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "v2") - }) -} - -func TestDB_PutDeleteGet(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.getVal("foo", "v2") - h.delete("foo") - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_EmptyBatch(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.get("foo", false) - err := h.db.Write(new(Batch), h.wo) - if err != nil { - t.Error("writing empty batch yield error: ", err) - } - h.get("foo", false) -} - -func TestDB_GetFromFrozen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) - defer h.close() - - h.put("foo", "v1") - h.getVal("foo", "v1") - - h.stor.DelaySync(storage.TypeTable) // Block sync calls - h.put("k1", strings.Repeat("x", 100000)) // Fill memtable - h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction - for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { - time.Sleep(10 * time.Microsecond) - } - if h.db.getFrozenMem() == nil { - h.stor.ReleaseSync(storage.TypeTable) - t.Fatal("No frozen mem") - } - h.getVal("foo", "v1") - h.stor.ReleaseSync(storage.TypeTable) // Release sync calls - - h.reopenDB() - h.getVal("foo", "v1") - h.get("k1", true) - h.get("k2", true) -} - -func TestDB_GetFromTable(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.getVal("foo", "v1") - }) -} - -func TestDB_GetSnapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - bar := strings.Repeat("b", 200) - h.put("foo", "v1") - h.put(bar, "v1") - - snap, err := h.db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.put("foo", "v2") - h.put(bar, "v2") - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - h.compactMem() - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - snap.Release() - - h.reopenDB() - h.getVal("foo", "v2") - h.getVal(bar, "v2") - }) -} - -func TestDB_GetLevel0Ordering(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 4; i++ { - h.put("bar", fmt.Sprintf("b%d", i)) - h.put("foo", fmt.Sprintf("v%d", i)) - h.compactMem() - } - h.getVal("foo", "v3") - h.getVal("bar", "b3") - - v := h.db.s.version() - t0len := v.tLen(0) - v.release() - if t0len < 2 { - t.Errorf("level-0 tables is less than 2, got %d", t0len) - } - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "b3") - }) -} - -func TestDB_GetOrderedByLevels(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.compactRange("a", "z") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.compactMem() - h.getVal("foo", "v2") - }) -} - -func TestDB_GetPicksCorrectFile(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange to have multiple files in a non-level-0 level. - h.put("a", "va") - h.compactMem() - h.compactRange("a", "b") - h.put("x", "vx") - h.compactMem() - h.compactRange("x", "y") - h.put("f", "vf") - h.compactMem() - h.compactRange("f", "g") - - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - - h.compactRange("", "") - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - }) -} - -func TestDB_GetEncountersEmptyLevel(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange for the following to happen: - // * sstable A in level 0 - // * nothing in level 1 - // * sstable B in level 2 - // Then do enough Get() calls to arrange for an automatic compaction - // of sstable A. A bug would cause the compaction to be marked as - // occuring at level 1 (instead of the correct level 0). - - // Step 1: First place sstables in levels 0 and 2 - for i := 0; ; i++ { - if i >= 100 { - t.Fatal("could not fill levels-0 and level-2") - } - v := h.db.s.version() - if v.tLen(0) > 0 && v.tLen(2) > 0 { - v.release() - break - } - v.release() - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - - h.getVal("a", "begin") - h.getVal("z", "end") - } - - // Step 2: clear level 1 if necessary. - h.compactRangeAt(1, "", "") - h.tablesPerLevel("1,0,1") - - h.getVal("a", "begin") - h.getVal("z", "end") - - // Step 3: read a bunch of times - for i := 0; i < 200; i++ { - h.get("missing", false) - } - - // Step 4: Wait for compaction to finish - h.waitCompaction() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - h.getVal("a", "begin") - h.getVal("z", "end") - }) -} - -func TestDB_IterMultiWithDelete(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("a", "va") - h.put("b", "vb") - h.put("c", "vc") - h.delete("b") - h.get("b", false) - - iter := h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - - h.compactMem() - - iter = h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - }) -} - -func TestDB_IteratorPinsRef(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "hello") - - // Get iterator that will yield the current contents of the DB. - iter := h.db.NewIterator(nil, nil) - - // Write to force compactions - h.put("foo", "newvalue1") - for i := 0; i < 100; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - h.put("foo", "newvalue2") - - iter.First() - testKeyVal(t, iter, "foo->hello") - if iter.Next() { - t.Errorf("expect eof") - } - iter.Release() -} - -func TestDB_Recover(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("baz", "v5") - - h.reopenDB() - h.getVal("foo", "v1") - - h.getVal("foo", "v1") - h.getVal("baz", "v5") - h.put("bar", "v2") - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - h.put("foo", "v4") - h.getVal("foo", "v4") - h.getVal("bar", "v2") - h.getVal("baz", "v5") - }) -} - -func TestDB_RecoverWithEmptyJournal(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("foo", "v2") - - h.reopenDB() - h.reopenDB() - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - }) -} - -func TestDB_RecoverDuringMemtableCompaction(t *testing.T) { - truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { - - h.stor.DelaySync(storage.TypeTable) - h.put("big1", strings.Repeat("x", 10000000)) - h.put("big2", strings.Repeat("y", 1000)) - h.put("bar", "v2") - h.stor.ReleaseSync(storage.TypeTable) - - h.reopenDB() - h.getVal("bar", "v2") - h.getVal("big1", strings.Repeat("x", 10000000)) - h.getVal("big2", strings.Repeat("y", 1000)) - }) -} - -func TestDB_MinorCompactionsHappen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) - defer h.close() - - n := 500 - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - for i := 0; i < n; i++ { - h.put(key(i), key(i)+strings.Repeat("v", 1000)) - } - - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } - - h.reopenDB() - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } -} - -func TestDB_RecoverWithLargeJournal(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("big1", strings.Repeat("1", 200000)) - h.put("big2", strings.Repeat("2", 200000)) - h.put("small3", strings.Repeat("3", 10)) - h.put("small4", strings.Repeat("4", 10)) - h.tablesPerLevel("") - - // Make sure that if we re-open with a small write buffer size that - // we flush table files in the middle of a large journal file. - h.o.WriteBuffer = 100000 - h.reopenDB() - h.getVal("big1", strings.Repeat("1", 200000)) - h.getVal("big2", strings.Repeat("2", 200000)) - h.getVal("small3", strings.Repeat("3", 10)) - h.getVal("small4", strings.Repeat("4", 10)) - v := h.db.s.version() - if v.tLen(0) <= 1 { - t.Errorf("tables-0 less than one") - } - v.release() -} - -func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 10000000, - Compression: opt.NoCompression, - }) - defer h.close() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - n := 80 - - // Write 8MB (80 values, each 100K) - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - - // Reopening moves updates to level-0 - h.reopenDB() - h.compactRangeAt(0, "", "") - - v = h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - if v.tLen(1) <= 1 { - t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) - } - v.release() - - for i := 0; i < n; i++ { - h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } -} - -func TestDB_RepeatedWritesToSameKey(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - h.reopenDB() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_SparseMerge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - h.putMulti(h.o.GetNumLevel(), "A", "Z") - - // Suppose there is: - // small amount of data with prefix A - // large amount of data with prefix B - // small amount of data with prefix C - // and that recent updates have made small changes to all three prefixes. - // Check that we do not do a compaction that merges all of B in one shot. - h.put("A", "va") - value := strings.Repeat("x", 1000) - for i := 0; i < 100000; i++ { - h.put(fmt.Sprintf("B%010d", i), value) - } - h.put("C", "vc") - h.compactMem() - h.compactRangeAt(0, "", "") - h.waitCompaction() - - // Make sparse update - h.put("A", "va2") - h.put("B100", "bvalue2") - h.put("C", "vc2") - h.compactMem() - - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(0, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(1, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) -} - -func TestDB_SizeOf(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - WriteBuffer: 10000000, - }) - defer h.close() - - h.sizeAssert("", "xyz", 0, 0) - h.reopenDB() - h.sizeAssert("", "xyz", 0, 0) - - // Write 8MB (80 values, each 100K) - n := 80 - s1 := 100000 - s2 := 105000 - - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) - } - - // 0 because SizeOf() does not account for memtable space - h.sizeAssert("", numKey(50), 0, 0) - - for r := 0; r < 3; r++ { - h.reopenDB() - - for cs := 0; cs < n; cs += 10 { - for i := 0; i < n; i += 10 { - h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) - h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) - h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) - } - - h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) - h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) - - h.compactRangeAt(0, numKey(cs), numKey(cs+9)) - } - - v := h.db.s.version() - if v.tLen(0) != 0 { - t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) - } - if v.tLen(1) == 0 { - t.Error("level-1 tables was zero") - } - v.release() - } -} - -func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - sizes := []uint64{ - 10000, - 10000, - 100000, - 10000, - 100000, - 10000, - 300000, - 10000, - } - - for i, n := range sizes { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) - } - - for r := 0; r < 3; r++ { - h.reopenDB() - - var x uint64 - for i, n := range sizes { - y := x - if i > 0 { - y += 1000 - } - h.sizeAssert("", numKey(i), x, y) - x += n - } - - h.sizeAssert(numKey(3), numKey(5), 110000, 111000) - - h.compactRangeAt(0, "", "") - } -} - -func TestDB_Snapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - s1 := h.getSnapshot() - h.put("foo", "v2") - s2 := h.getSnapshot() - h.put("foo", "v3") - s3 := h.getSnapshot() - h.put("foo", "v4") - - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getValr(s3, "foo", "v3") - h.getVal("foo", "v4") - - s3.Release() - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s1.Release() - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s2.Release() - h.getVal("foo", "v4") - }) -} - -func TestDB_SnapshotList(t *testing.T) { - db := &DB{snapsList: list.New()} - e0a := db.acquireSnapshot() - e0b := db.acquireSnapshot() - db.seq = 1 - e1 := db.acquireSnapshot() - db.seq = 2 - e2 := db.acquireSnapshot() - - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0a) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0b) - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - e2 = db.acquireSnapshot() - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e1) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } -} - -func TestDB_HiddenValuesAreRemoved(t *testing.T) { - trun(t, func(h *dbHarness) { - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.put("foo", "v2") - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactMem() - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactRangeAt(m-2, "", "z") - // DEL eliminated, but v1 remains because we aren't compacting that level - // (DEL can be eliminated because v2 hides v1). - h.allEntriesFor("foo", "[ v2, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ v2 ]") - }) -} - -func TestDB_DeletionMarkers2(t *testing.T) { - h := newDbHarness(t) - defer h.close() - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactMem() // Moves to level last-2 - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-2, "", "") - // DEL kept: "last" file overlaps - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ ]") -} - -func TestDB_CompactionTableOpenError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1}) - defer h.close() - - im := 10 - jm := 10 - for r := 0; r < 2; r++ { - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - h.compactMem() - } - } - - if n := h.totalTables(); n != im*2 { - t.Errorf("total tables is %d, want %d", n, im) - } - - h.stor.SetEmuErr(storage.TypeTable, tsOpOpen) - go h.db.CompactRange(util.Range{}) - if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { - t.Log("compaction error: ", err) - } - h.closeDB0() - h.openDB() - h.stor.SetEmuErr(0, tsOpOpen) - - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - } -} - -func TestDB_OverlapInLevel0(t *testing.T) { - trun(t, func(h *dbHarness) { - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. - h.put("100", "v100") - h.put("999", "v999") - h.compactMem() - h.delete("100") - h.delete("999") - h.compactMem() - h.tablesPerLevel("0,1,1") - - // Make files spanning the following ranges in level-0: - // files[0] 200 .. 900 - // files[1] 300 .. 500 - // Note that files are sorted by min key. - h.put("300", "v300") - h.put("500", "v500") - h.compactMem() - h.put("200", "v200") - h.put("600", "v600") - h.put("900", "v900") - h.compactMem() - h.tablesPerLevel("2,1,1") - - // Compact away the placeholder files we created initially - h.compactRangeAt(1, "", "") - h.compactRangeAt(2, "", "") - h.tablesPerLevel("2") - - // Do a memtable compaction. Before bug-fix, the compaction would - // not detect the overlap with level-0 files and would incorrectly place - // the deletion in a deeper level. - h.delete("600") - h.compactMem() - h.tablesPerLevel("3") - h.get("600", false) - }) -} - -func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("b", "v") - h.reopenDB() - h.delete("b") - h.delete("a") - h.reopenDB() - h.delete("a") - h.reopenDB() - h.put("a", "v") - h.reopenDB() - h.reopenDB() - h.getKeyVal("(a->v)") - h.waitCompaction() - h.getKeyVal("(a->v)") -} - -func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("e") - h.put("", "") - h.reopenDB() - h.put("c", "cv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.put("", "") - h.waitCompaction() - h.reopenDB() - h.put("d", "dv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("d") - h.delete("b") - h.reopenDB() - h.getKeyVal("(->)(c->cv)") - h.waitCompaction() - h.getKeyVal("(->)(c->cv)") -} - -func TestDB_SingleEntryMemCompaction(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 10; i++ { - h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) - h.compactMem() - h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) - h.compactMem() - h.put("k", "v") - h.compactMem() - h.put("", "") - h.compactMem() - h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) - h.compactMem() - } - }) -} - -func TestDB_ManifestWriteError(t *testing.T) { - for i := 0; i < 2; i++ { - func() { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "bar") - h.getVal("foo", "bar") - - // Mem compaction (will succeed) - h.compactMem() - h.getVal("foo", "bar") - v := h.db.s.version() - if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 { - t.Errorf("invalid total tables, want=1 got=%d", n) - } - v.release() - - if i == 0 { - h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite) - } else { - h.stor.SetEmuErr(storage.TypeManifest, tsOpSync) - } - - // Merging compaction (will fail) - h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true) - - h.db.Close() - h.stor.SetEmuErr(0, tsOpWrite) - h.stor.SetEmuErr(0, tsOpSync) - - // Should not lose data - h.openDB() - h.getVal("foo", "bar") - }() - } -} - -func assertErr(t *testing.T, err error, wanterr bool) { - if err != nil { - if wanterr { - t.Log("AssertErr: got error (expected): ", err) - } else { - t.Error("AssertErr: got error: ", err) - } - } else if wanterr { - t.Error("AssertErr: expect error") - } -} - -func TestDB_ClosedIsClosed(t *testing.T) { - h := newDbHarness(t) - db := h.db - - var iter, iter2 iterator.Iterator - var snap *Snapshot - func() { - defer h.close() - - h.put("k", "v") - h.getVal("k", "v") - - iter = db.NewIterator(nil, h.ro) - iter.Seek([]byte("k")) - testKeyVal(t, iter, "k->v") - - var err error - snap, err = db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.getValr(snap, "k", "v") - - iter2 = snap.NewIterator(nil, h.ro) - iter2.Seek([]byte("k")) - testKeyVal(t, iter2, "k->v") - - h.put("foo", "v2") - h.delete("foo") - - // closing DB - iter.Release() - iter2.Release() - }() - - assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) - _, err := db.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - if iter.Valid() { - t.Errorf("iter.Valid should false") - } - assertErr(t, iter.Error(), false) - testKeyVal(t, iter, "->") - if iter.Seek([]byte("k")) { - t.Errorf("iter.Seek should false") - } - assertErr(t, iter.Error(), true) - - assertErr(t, iter2.Error(), false) - - _, err = snap.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - _, err = db.GetSnapshot() - assertErr(t, err, true) - - iter3 := db.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - iter3 = snap.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - assertErr(t, db.Delete([]byte("k"), h.wo), true) - - _, err = db.GetProperty("leveldb.stats") - assertErr(t, err, true) - - _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) - assertErr(t, err, true) - - assertErr(t, db.CompactRange(util.Range{}), true) - - assertErr(t, db.Close(), true) -} - -type numberComparer struct{} - -func (numberComparer) num(x []byte) (n int) { - fmt.Sscan(string(x[1:len(x)-1]), &n) - return -} - -func (numberComparer) Name() string { - return "test.NumberComparer" -} - -func (p numberComparer) Compare(a, b []byte) int { - return p.num(a) - p.num(b) -} - -func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } -func (numberComparer) Successor(dst, b []byte) []byte { return nil } - -func TestDB_CustomComparer(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Comparer: numberComparer{}, - WriteBuffer: 1000, - }) - defer h.close() - - h.put("[10]", "ten") - h.put("[0x14]", "twenty") - for i := 0; i < 2; i++ { - h.getVal("[10]", "ten") - h.getVal("[0xa]", "ten") - h.getVal("[20]", "twenty") - h.getVal("[0x14]", "twenty") - h.get("[15]", false) - h.get("[0xf]", false) - h.compactMem() - h.compactRange("[0]", "[9999]") - } - - for n := 0; n < 2; n++ { - for i := 0; i < 100; i++ { - v := fmt.Sprintf("[%d]", i*10) - h.put(v, v) - } - h.compactMem() - h.compactRange("[0]", "[1000000]") - } -} - -func TestDB_ManualCompaction(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - h.putMulti(3, "p", "q") - h.tablesPerLevel("1,1,1") - - // Compaction range falls before files - h.compactRange("", "c") - h.tablesPerLevel("1,1,1") - - // Compaction range falls after files - h.compactRange("r", "z") - h.tablesPerLevel("1,1,1") - - // Compaction range overlaps files - h.compactRange("p1", "p9") - h.tablesPerLevel("0,0,1") - - // Populate a different range - h.putMulti(3, "c", "e") - h.tablesPerLevel("1,1,2") - - // Compact just the new range - h.compactRange("b", "f") - h.tablesPerLevel("0,0,2") - - // Compact all - h.putMulti(1, "a", "z") - h.tablesPerLevel("0,1,2") - h.compactRange("", "") - h.tablesPerLevel("0,0,1") -} - -func TestDB_BloomFilter(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableBlockCache: true, - Filter: filter.NewBloomFilter(10), - }) - defer h.close() - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - const n = 10000 - - // Populate multiple layers - for i := 0; i < n; i++ { - h.put(key(i), key(i)) - } - h.compactMem() - h.compactRange("a", "z") - for i := 0; i < n; i += 100 { - h.put(key(i), key(i)) - } - h.compactMem() - - // Prevent auto compactions triggered by seeks - h.stor.DelaySync(storage.TypeTable) - - // Lookup present keys. Should rarely read from small sstable. - h.stor.SetReadCounter(storage.TypeTable) - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)) - } - cnt := int(h.stor.ReadCounter()) - t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) - - if min, max := n, n+2*n/100; cnt < min || cnt > max { - t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) - } - - // Lookup missing keys. Should rarely read from either sstable. - h.stor.ResetReadCounter() - for i := 0; i < n; i++ { - h.get(key(i)+".missing", false) - } - cnt = int(h.stor.ReadCounter()) - t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) - if max := 3 * n / 100; cnt > max { - t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) - } - - h.stor.ReleaseSync(storage.TypeTable) -} - -func TestDB_Concurrent(t *testing.T) { - const n, secs, maxkey = 4, 2, 1000 - - runtime.GOMAXPROCS(n) - trun(t, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - var cnt [n]uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - var put, get, found uint - defer func() { - t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", - i, cnt[i], put, get, found, get-found) - closeWg.Done() - }() - - rnd := rand.New(rand.NewSource(int64(1000 + i))) - for atomic.LoadUint32(&stop) == 0 { - x := cnt[i] - - k := rnd.Intn(maxkey) - kstr := fmt.Sprintf("%016d", k) - - if (rnd.Int() % 2) > 0 { - put++ - h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) - } else { - get++ - v, err := h.db.Get([]byte(kstr), h.ro) - if err == nil { - found++ - rk, ri, rx := 0, -1, uint32(0) - fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) - if rk != k { - t.Errorf("invalid key want=%d got=%d", k, rk) - } - if ri < 0 || ri >= n { - t.Error("invalid goroutine number: ", ri) - } else { - tx := atomic.LoadUint32(&(cnt[ri])) - if rx > tx { - t.Errorf("invalid seq number, %d > %d ", rx, tx) - } - } - } else if err != ErrNotFound { - t.Error("Get: got error: ", err) - return - } - } - atomic.AddUint32(&cnt[i], 1) - } - }(i) - } - - time.Sleep(secs * time.Second) - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_Concurrent2(t *testing.T) { - const n, n2 = 4, 4000 - - runtime.GOMAXPROCS(n*2 + 2) - truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 0; atomic.LoadUint32(&stop) == 0; k++ { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - cmp := comparer.DefaultComparer - for i := 0; i < n2; i++ { - closeWg.Add(1) - go func(i int) { - it := h.db.NewIterator(nil, nil) - var pk []byte - for it.Next() { - kk := it.Key() - if cmp.Compare(kk, pk) <= 0 { - t.Errorf("iter %d: %q is successor of %q", i, pk, kk) - } - pk = append(pk[:0], kk...) - var k, vk, vi int - if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { - t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) - } else if n < 1 { - t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) - } - if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { - t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) - } else if n < 2 { - t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) - } - - if vk != k { - t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) - } - } - if err := it.Error(); err != nil { - t.Errorf("iter %d: Got error: %v", i, err) - } - it.Release() - closeWg.Done() - }(i) - } - - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_CreateReopenDbOnFile(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - stor, err := storage.OpenFile(dbpath) - if err != nil { - t.Fatalf("(%d) cannot open storage: %s", i, err) - } - db, err := Open(stor, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - if err := stor.Close(); err != nil { - t.Fatalf("(%d) cannot close storage: %s", i, err) - } - } -} - -func TestDB_CreateReopenDbOnFile2(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - db, err := OpenFile(dbpath, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - } -} - -func TestDB_DeletionMarkersOnMemdb(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.compactMem() - h.delete("foo") - h.get("foo", false) - h.getKeyVal("") -} - -func TestDB_LeveldbIssue178(t *testing.T) { - nKeys := (opt.DefaultCompactionTableSize / 30) * 5 - key1 := func(i int) string { - return fmt.Sprintf("my_key_%d", i) - } - key2 := func(i int) string { - return fmt.Sprintf("my_key_%d_xxx", i) - } - - // Disable compression since it affects the creation of layers and the - // code below is trying to test against a very specific scenario. - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - // Create first key range. - batch := new(Batch) - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key1(i)), []byte("value for range 1 key")) - } - h.write(batch) - - // Create second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key2(i)), []byte("value for range 2 key")) - } - h.write(batch) - - // Delete second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Delete([]byte(key2(i))) - } - h.write(batch) - h.waitMemCompaction() - - // Run manual compaction. - h.compactRange(key1(0), key1(nKeys-1)) - - // Checking the keys. - h.assertNumKeys(nKeys) -} - -func TestDB_LeveldbIssue200(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("1", "b") - h.put("2", "c") - h.put("3", "d") - h.put("4", "e") - h.put("5", "f") - - iter := h.db.NewIterator(nil, h.ro) - - // Add an element that should not be reflected in the iterator. - h.put("25", "cd") - - iter.Seek([]byte("5")) - assertBytes(t, []byte("5"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("4"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("3"), iter.Key()) - iter.Next() - assertBytes(t, []byte("4"), iter.Key()) - iter.Next() - assertBytes(t, []byte("5"), iter.Key()) -} - -func TestDB_GoleveldbIssue74(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - }) - defer h.close() - - const n, dur = 10000, 5 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(2) - var done uint32 - go func() { - var i int - defer func() { - t.Logf("WRITER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - - b := new(Batch) - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iv := fmt.Sprintf("VAL%010d", i) - for k := 0; k < n; k++ { - key := fmt.Sprintf("KEY%06d", k) - b.Put([]byte(key), []byte(key+iv)) - b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key)) - } - h.write(b) - - b.Reset() - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err) - } else if string(value) != string(key)+iv { - t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value) - } - - b.Delete(key) - b.Delete(ptrKey) - } - h.write(b) - iter.Release() - snap.Release() - if k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var prevValue string - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err) - } else if prevValue != "" && string(value) != string(key)+prevValue { - t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value) - } else { - prevValue = string(value[len(key):]) - } - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - wg.Wait() -} - -func TestDB_GetProperties(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - _, err := h.db.GetProperty("leveldb.num-files-at-level") - if err == nil { - t.Error("GetProperty() failed to detect missing level") - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0") - if err != nil { - t.Error("got unexpected error", err) - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0x") - if err == nil { - t.Error("GetProperty() failed to detect invalid level") - } -} - -func TestDB_GoleveldbIssue72and83(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - OpenFilesCacheCapacity: 3, - }) - defer h.close() - - const n, wn, dur = 10000, 100, 30 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - randomData := func(prefix byte, i int) []byte { - data := make([]byte, 1+4+32+64+32) - _, err := crand.Reader.Read(data[1 : len(data)-8]) - if err != nil { - panic(err) - } - data[0] = prefix - binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i)) - binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value()) - return data - } - - keys := make([][]byte, n) - for i := range keys { - keys[i] = randomData(1, 0) - } - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(3) - var done uint32 - go func() { - i := 0 - defer func() { - t.Logf("WRITER DONE #%d", i) - wg.Done() - }() - - b := new(Batch) - for ; i < wn && atomic.LoadUint32(&done) == 0; i++ { - b.Reset() - for _, k1 := range keys { - k2 := randomData(2, i) - b.Put(k2, randomData(42, i)) - b.Put(k1, k2) - } - if err := h.db.Write(b, h.wo); err != nil { - atomic.StoreUint32(&done, 1) - t.Fatalf("WRITER #%d db.Write: %v", i, err) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER0 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - seq := snap.elem.seq - if seq == 0 { - snap.Release() - continue - } - iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) - writei := int(seq/(n*2) - 1) - var k int - for ; iter.Next(); k++ { - k1 := iter.Key() - k2 := iter.Value() - k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:]) - k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value() - if k1checksum0 != k1checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0) - } - k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:]) - k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value() - if k2checksum0 != k2checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1) - } - kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:])) - if writei != kwritei { - t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei) - } - if _, err := snap.Get(k2, nil); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2) - } - } - if err := iter.Error(); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err) - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER1 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iter := h.db.NewIterator(nil, nil) - seq := iter.(*dbIter).seq - if seq == 0 { - iter.Release() - continue - } - writei := int(seq/(n*2) - 1) - var k int - for ok := iter.Last(); ok; ok = iter.Prev() { - k++ - } - if err := iter.Error(); err != nil { - t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err) - } - iter.Release() - if m := (writei+1)*n + n; k != m { - t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m) - } - } - }() - - wg.Wait() -} - -func TestDB_TransientError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 128 * opt.KiB, - OpenFilesCacheCapacity: 3, - DisableCompactionBackoff: true, - }) - defer h.close() - - const ( - nSnap = 20 - nKey = 10000 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Delete([]byte(key)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - } - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - - runtime.GOMAXPROCS(runtime.NumCPU()) - - rnd := rand.New(rand.NewSource(0xecafdaed)) - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(2) - - go func(i int, snap *Snapshot, sk []int) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for _, k := range sk { - key := fmt.Sprintf("KEY%8d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap, rnd.Perm(nKey)) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - iter := snap.NewIterator(nil, nil) - defer iter.Release() - for k := 0; k < nKey; k++ { - if !iter.Next() { - if err := iter.Error(); err != nil { - t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err) - } else { - t.Fatalf("READER_ITER #%d K%d eoi", i, k) - } - } - key := fmt.Sprintf("KEY%8d", k) - xkey := iter.Key() - if !bytes.Equal([]byte(key), xkey) { - t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey) - } - value := key + vtail - xvalue := iter.Value() - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - CompactionExpandLimitFactor: 1, - }) - defer h.close() - - const ( - nSnap = 190 - nKey = 140 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Delete([]byte(key)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - } - - h.compactMem() - - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - - h.compactRangeAt(0, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - h.compactRangeAt(1, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - runtime.GOMAXPROCS(runtime.NumCPU()) - - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(1) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_TableCompactionBuilder(t *testing.T) { - stor := newTestStorage(t) - defer stor.Close() - - const nSeq = 99 - - o := &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 43 * opt.KiB, - CompactionExpandLimitFactor: 1, - CompactionGPOverlapsFactor: 1, - DisableBlockCache: true, - } - s, err := newSession(stor, o) - if err != nil { - t.Fatal(err) - } - if err := s.create(); err != nil { - t.Fatal(err) - } - defer s.close() - var ( - seq uint64 - targetSize = 5 * o.CompactionTableSize - value = bytes.Repeat([]byte{'0'}, 100) - ) - for i := 0; i < 2; i++ { - tw, err := s.tops.create() - if err != nil { - t.Fatal(err) - } - for k := 0; tw.tw.BytesLen() < targetSize; k++ { - key := []byte(fmt.Sprintf("%09d", k)) - seq += nSeq - 1 - for x := uint64(0); x < nSeq; x++ { - if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil { - t.Fatal(err) - } - } - } - tf, err := tw.finish() - if err != nil { - t.Fatal(err) - } - rec := &sessionRecord{} - rec.addTableFile(i, tf) - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - } - - // Build grandparent. - v := s.version() - c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec := &sessionRecord{} - b := &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize/3 + 961, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - // Build level-1. - v = s.version() - c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...)) - rec = &sessionRecord{} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - // Move grandparent to level-3 - for _, t := range v.tables[2] { - rec.delTable(2, t.file.Num()) - rec.addTableFile(3, t) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - v = s.version() - for level, want := range []bool{false, true, false, true, false} { - got := len(v.tables[level]) > 0 - if want != got { - t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got) - } - } - for i, f := range v.tables[1][:len(v.tables[1])-1] { - nf := v.tables[1][i+1] - if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) { - t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num()) - } - } - v.release() - - // Compaction with transient error. - v = s.version() - c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec = &sessionRecord{} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - stor.SetEmuErrOnce(storage.TypeTable, tsOpSync) - stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite) - stor.SetEmuRandErrProb(0xf0) - for { - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Logf("(expected) b.run: %v", err) - } else { - break - } - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - stor.SetEmuErrOnce(0, tsOpSync) - stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite) - - v = s.version() - if len(v.tables[1]) != len(v.tables[2]) { - t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2])) - } - for i, f0 := range v.tables[1] { - f1 := v.tables[2][i] - iter0 := s.tops.newIterator(f0, nil, nil) - iter1 := s.tops.newIterator(f1, nil, nil) - for j := 0; true; j++ { - next0 := iter0.Next() - next1 := iter1.Next() - if next0 != next1 { - t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1) - } - key0 := iter0.Key() - key1 := iter1.Key() - if !bytes.Equal(key0, key1) { - t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1) - } - if next0 == false { - break - } - } - iter0.Release() - iter1.Release() - } - v.release() -} - -func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) { - const ( - vSize = 200 * opt.KiB - tSize = 100 * opt.MiB - mIter = 100 - n = tSize / vSize - ) - - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - DisableBlockCache: true, - }) - defer h.close() - - key := func(x int) string { - return fmt.Sprintf("v%06d", x) - } - - // Fill. - value := strings.Repeat("x", vSize) - for i := 0; i < n; i++ { - h.put(key(i), value) - } - h.compactMem() - - // Delete all. - for i := 0; i < n; i++ { - h.delete(key(i)) - } - h.compactMem() - - var ( - limit = n / limitDiv - - startKey = key(0) - limitKey = key(limit) - maxKey = key(n) - slice = &util.Range{Limit: []byte(limitKey)} - - initialSize0 = h.sizeOf(startKey, limitKey) - initialSize1 = h.sizeOf(limitKey, maxKey) - ) - - t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1))) - - for r := 0; true; r++ { - if r >= mIter { - t.Fatal("taking too long to compact") - } - - // Iterates. - iter := h.db.NewIterator(slice, h.ro) - for iter.Next() { - } - if err := iter.Error(); err != nil { - t.Fatalf("Iter err: %v", err) - } - iter.Release() - - // Wait compaction. - h.waitCompaction() - - // Check size. - size0 := h.sizeOf(startKey, limitKey) - size1 := h.sizeOf(limitKey, maxKey) - t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1))) - if size0 < initialSize0/10 { - break - } - } - - if initialSize1 > 0 { - h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB) - } -} - -func TestDB_IterTriggeredCompaction(t *testing.T) { - testDB_IterTriggeredCompaction(t, 1) -} - -func TestDB_IterTriggeredCompactionHalf(t *testing.T) { - testDB_IterTriggeredCompaction(t, 2) -} - -func TestDB_ReadOnly(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.put("bar", "v2") - h.compactMem() - - h.put("xfoo", "v1") - h.put("xbar", "v2") - - t.Log("Trigger read-only") - if err := h.db.SetReadOnly(); err != nil { - h.close() - t.Fatalf("SetReadOnly error: %v", err) - } - - h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync) - - ro := func(key, value, wantValue string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly { - t.Fatalf("unexpected error: %v", err) - } - h.getVal(key, wantValue) - } - - ro("foo", "vx", "v1") - - h.o.ReadOnly = true - h.reopenDB() - - ro("foo", "vx", "v1") - ro("bar", "vx", "v2") - h.assertNumKeys(4) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go deleted file mode 100644 index b328ece..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Leveldb external", func() { - o := &opt.Options{ - DisableBlockCache: true, - BlockRestartInterval: 5, - BlockSize: 80, - Compression: opt.NoCompression, - OpenFilesCacheCapacity: -1, - Strict: opt.StrictAll, - WriteBuffer: 1000, - CompactionTableSize: 2000, - } - - Describe("write test", func() { - It("should do write correctly", func(done Done) { - db := newTestingDB(o, nil, nil) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), - } - testutil.DoDBTesting(&t) - db.TestClose() - done <- true - }, 20.0) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := newTestingDB(o, nil, nil) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - err := db.TestPut(key, value) - Expect(err).NotTo(HaveOccurred()) - }) - - return db - }, func(db testutil.DB) { - db.(*testingDB).TestClose() - }) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go deleted file mode 100644 index 1fb56f0..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "encoding/binary" - "github.com/syndtr/goleveldb/leveldb/util" - "testing" -) - -type harness struct { - t *testing.T - - bloom Filter - generator FilterGenerator - filter []byte -} - -func newHarness(t *testing.T) *harness { - bloom := NewBloomFilter(10) - return &harness{ - t: t, - bloom: bloom, - generator: bloom.NewGenerator(), - } -} - -func (h *harness) add(key []byte) { - h.generator.Add(key) -} - -func (h *harness) addNum(key uint32) { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - h.add(b[:]) -} - -func (h *harness) build() { - b := &util.Buffer{} - h.generator.Generate(b) - h.filter = b.Bytes() -} - -func (h *harness) reset() { - h.filter = nil -} - -func (h *harness) filterLen() int { - return len(h.filter) -} - -func (h *harness) assert(key []byte, want, silent bool) bool { - got := h.bloom.Contains(h.filter, key) - if !silent && got != want { - h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) - } - return got -} - -func (h *harness) assertNum(key uint32, want, silent bool) bool { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - return h.assert(b[:], want, silent) -} - -func TestBloomFilter_Empty(t *testing.T) { - h := newHarness(t) - h.build() - h.assert([]byte("hello"), false, false) - h.assert([]byte("world"), false, false) -} - -func TestBloomFilter_Small(t *testing.T) { - h := newHarness(t) - h.add([]byte("hello")) - h.add([]byte("world")) - h.build() - h.assert([]byte("hello"), true, false) - h.assert([]byte("world"), true, false) - h.assert([]byte("x"), false, false) - h.assert([]byte("foo"), false, false) -} - -func nextN(n int) int { - switch { - case n < 10: - n += 1 - case n < 100: - n += 10 - case n < 1000: - n += 100 - default: - n += 1000 - } - return n -} - -func TestBloomFilter_VaryingLengths(t *testing.T) { - h := newHarness(t) - var mediocre, good int - for n := 1; n < 10000; n = nextN(n) { - h.reset() - for i := 0; i < n; i++ { - h.addNum(uint32(i)) - } - h.build() - - got := h.filterLen() - want := (n * 10 / 8) + 40 - if got > want { - t.Errorf("filter len test failed, '%d' > '%d'", got, want) - } - - for i := 0; i < n; i++ { - h.assertNum(uint32(i), true, false) - } - - var rate float32 - for i := 0; i < 10000; i++ { - if h.assertNum(uint32(i+1000000000), true, true) { - rate++ - } - } - rate /= 10000 - if rate > 0.02 { - t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) - } - if rate > 0.0125 { - mediocre++ - } else { - good++ - } - } - t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) - if mediocre > good/5 { - t.Error("mediocre false positive rate is more than expected") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go deleted file mode 100644 index 1ed6d07..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Array iterator", func() { - It("Should iterates and seeks correctly", func() { - // Build key/value. - kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewArrayIterator(kv), - } - testutil.DoIteratorTesting(&t) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go deleted file mode 100644 index 72a7978..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - "sort" - - . "github.com/onsi/ginkgo" - - "github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -type keyValue struct { - key []byte - testutil.KeyValue -} - -type keyValueIndex []keyValue - -func (x keyValueIndex) Search(key []byte) int { - return sort.Search(x.Len(), func(i int) bool { - return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 - }) -} - -func (x keyValueIndex) Len() int { return len(x) } -func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } -func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } - -var _ = testutil.Defer(func() { - Describe("Indexed iterator", func() { - Test := func(n ...int) func() { - if len(n) == 0 { - rnd := testutil.NewRand() - n = make([]int, rnd.Intn(17)+3) - for i := range n { - n[i] = rnd.Intn(19) + 1 - } - } - - return func() { - It("Should iterates and seeks correctly", func(done Done) { - // Build key/value. - index := make(keyValueIndex, len(n)) - sum := 0 - for _, x := range n { - sum += x - } - kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) - for i, j := 0, 0; i < len(n); i++ { - for x := n[i]; x > 0; x-- { - key, value := kv.Index(j) - index[i].key = key - index[i].Put(key, value) - j++ - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewIndexedIterator(NewArrayIndexer(index), true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with 100 keys", Test(100)) - Describe("with 50-50 keys", Test(50, 50)) - Describe("with 50-1 keys", Test(50, 1)) - Describe("with 50-1-50 keys", Test(50, 1, 50)) - Describe("with 1-50 keys", Test(1, 50)) - Describe("with random N-keys", Test()) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go deleted file mode 100644 index 5ef8d5b..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package iterator_test - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestIterator(t *testing.T) { - testutil.RunSuite(t, "Iterator Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go deleted file mode 100644 index e523b63..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Merged iterator", func() { - Test := func(filled int, empty int) func() { - return func() { - It("Should iterates and seeks correctly", func(done Done) { - rnd := testutil.NewRand() - - // Build key/value. - filledKV := make([]testutil.KeyValue, filled) - kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) - kv.Iterate(func(i int, key, value []byte) { - filledKV[rnd.Intn(filled)].Put(key, value) - }) - - // Create itearators. - iters := make([]Iterator, filled+empty) - for i := range iters { - if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { - filled-- - Expect(filledKV[filled].Len()).ShouldNot(BeZero()) - iters[i] = NewArrayIterator(filledKV[filled]) - } else { - empty-- - iters[i] = NewEmptyIterator(nil) - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with three, all filled iterators", Test(3, 0)) - Describe("with one filled, one empty iterators", Test(1, 1)) - Describe("with one filled, two empty iterators", Test(1, 2)) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go deleted file mode 100644 index 0fcf225..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -package journal - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math/rand" - "strings" - "testing" -) - -type dropper struct { - t *testing.T -} - -func (d dropper) Drop(err error) { - d.t.Log(err) -} - -func short(s string) string { - if len(s) < 64 { - return s - } - return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) -} - -// big returns a string of length n, composed of repetitions of partial. -func big(partial string, n int) string { - return strings.Repeat(partial, n/len(partial)+1)[:n] -} - -func TestEmpty(t *testing.T) { - buf := new(bytes.Buffer) - r := NewReader(buf, dropper{t}, true, true) - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { - buf := new(bytes.Buffer) - - reset() - w := NewWriter(buf) - for { - s, ok := gen() - if !ok { - break - } - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write([]byte(s)); err != nil { - t.Fatal(err) - } - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - reset() - r := NewReader(buf, dropper{t}, true, true) - for { - s, ok := gen() - if !ok { - break - } - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - x, err := ioutil.ReadAll(rr) - if err != nil { - t.Fatal(err) - } - if string(x) != s { - t.Fatalf("got %q, want %q", short(string(x)), short(s)) - } - } - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testLiterals(t *testing.T, s []string) { - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == len(s) { - return "", false - } - i++ - return s[i-1], true - } - testGenerator(t, reset, gen) -} - -func TestMany(t *testing.T) { - const n = 1e5 - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return fmt.Sprintf("%d.", i-1), true - } - testGenerator(t, reset, gen) -} - -func TestRandom(t *testing.T) { - const n = 1e2 - var ( - i int - r *rand.Rand - ) - reset := func() { - i, r = 0, rand.New(rand.NewSource(0)) - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true - } - testGenerator(t, reset, gen) -} - -func TestBasic(t *testing.T) { - testLiterals(t, []string{ - strings.Repeat("a", 1000), - strings.Repeat("b", 97270), - strings.Repeat("c", 8000), - }) -} - -func TestBoundary(t *testing.T) { - for i := blockSize - 16; i < blockSize+16; i++ { - s0 := big("abcd", i) - for j := blockSize - 16; j < blockSize+16; j++ { - s1 := big("ABCDE", j) - testLiterals(t, []string{s0, s1}) - testLiterals(t, []string{s0, "", s1}) - testLiterals(t, []string{s0, "x", s1}) - } - } -} - -func TestFlush(t *testing.T) { - buf := new(bytes.Buffer) - w := NewWriter(buf) - // Write a couple of records. Everything should still be held - // in the record.Writer buffer, so that buf.Len should be 0. - w0, _ := w.Next() - w0.Write([]byte("0")) - w1, _ := w.Next() - w1.Write([]byte("11")) - if got, want := buf.Len(), 0; got != want { - t.Fatalf("buffer length #0: got %d want %d", got, want) - } - // Flush the record.Writer buffer, which should yield 17 bytes. - // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #1: got %d want %d", got, want) - } - // Do another write, one that isn't large enough to complete the block. - // The write should not have flowed through to buf. - w2, _ := w.Next() - w2.Write(bytes.Repeat([]byte("2"), 10000)) - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #2: got %d want %d", got, want) - } - // Flushing should get us up to 10024 bytes written. - // 10024 = 17 + 7 + 10000. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 10024; got != want { - t.Fatalf("buffer length #3: got %d want %d", got, want) - } - // Do a bigger write, one that completes the current block. - // We should now have 32768 bytes (a complete block), without - // an explicit flush. - w3, _ := w.Next() - w3.Write(bytes.Repeat([]byte("3"), 40000)) - if got, want := buf.Len(), 32768; got != want { - t.Fatalf("buffer length #4: got %d want %d", got, want) - } - // Flushing should get us up to 50038 bytes written. - // 50038 = 10024 + 2*7 + 40000. There are two headers because - // the one record was split into two chunks. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 50038; got != want { - t.Fatalf("buffer length #5: got %d want %d", got, want) - } - // Check that reading those records give the right lengths. - r := NewReader(buf, dropper{t}, true, true) - wants := []int64{1, 2, 10000, 40000} - for i, want := range wants { - rr, _ := r.Next() - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #%d: %v", i, err) - } - if n != want { - t.Fatalf("read #%d: got %d bytes want %d", i, n, want) - } - } -} - -func TestNonExhaustiveRead(t *testing.T) { - const n = 100 - buf := new(bytes.Buffer) - p := make([]byte, 10) - rnd := rand.New(rand.NewSource(1)) - - w := NewWriter(buf) - for i := 0; i < n; i++ { - length := len(p) + rnd.Intn(3*blockSize) - s := string(uint8(i)) + "123456789abcdefgh" - ww, _ := w.Next() - ww.Write([]byte(big(s, length))) - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - for i := 0; i < n; i++ { - rr, _ := r.Next() - _, err := io.ReadFull(rr, p) - if err != nil { - t.Fatal(err) - } - want := string(uint8(i)) + "123456789" - if got := string(p); got != want { - t.Fatalf("read #%d: got %q want %q", i, got, want) - } - } -} - -func TestStaleReader(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w0.Write([]byte("0")) - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1.Write([]byte("11")) - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - r0, err := r.Next() - if err != nil { - t.Fatal(err) - } - r1, err := r.Next() - if err != nil { - t.Fatal(err) - } - p := make([]byte, 1) - if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale read #0: unexpected error: %v", err) - } - if _, err := r1.Read(p); err != nil { - t.Fatalf("fresh read #1: got %v want nil error", err) - } - if p[0] != '1' { - t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) - } -} - -func TestStaleWriter(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #0: unexpected error: %v", err) - } - if _, err := w1.Write([]byte("11")); err != nil { - t.Fatalf("fresh write #1: got %v want nil error", err) - } - if err := w.Flush(); err != nil { - t.Fatalf("flush: %v", err) - } - if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #1: unexpected error: %v", err) - } -} - -func TestCorrupt_MissingLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Cut the last block. - b := buf.Bytes()[:blockSize] - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read. - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if n != blockSize-1024 { - t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) - } - - // Second read. - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedFirstBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #0. - for i := 0; i < 1024; i++ { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (third record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #1. - for i := 0; i < 1024; i++ { - b[blockSize+i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - // Third read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #3. - for i := len(b) - 1; i > len(b)-1024; i-- { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize - headerSize); n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - // Third read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - // Fourth read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #3: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize/2 + headerSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go deleted file mode 100644 index 572ae81..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/errors" -) - -type ErrIkeyCorrupted struct { - Ikey []byte - Reason string -} - -func (e *ErrIkeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason) -} - -func newErrIkeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) -} - -type kType int - -func (kt kType) String() string { - switch kt { - case ktDel: - return "d" - case ktVal: - return "v" - } - return "x" -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - ktDel kType = iota - ktVal -) - -// ktSeek defines the kType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const ktSeek = ktVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - kMaxSeq uint64 = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek) -) - -// Maximum number encoded in bytes. -var kMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) -} - -type iKey []byte - -func newIkey(ukey []byte, seq uint64, kt kType) iKey { - if seq > kMaxSeq { - panic("leveldb: invalid sequence number") - } else if kt > ktVal { - panic("leveldb: invalid type") - } - - ik := make(iKey, len(ukey)+8) - copy(ik, ukey) - binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt)) - return ik -} - -func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) { - if len(ik) < 8 { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length") - } - num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type") - } - ukey = ik[:len(ik)-8] - return -} - -func validIkey(ik []byte) bool { - _, _, _, err := parseIkey(ik) - return err == nil -} - -func (ik iKey) assert() { - if ik == nil { - panic("leveldb: nil iKey") - } - if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik))) - } -} - -func (ik iKey) ukey() []byte { - ik.assert() - return ik[:len(ik)-8] -} - -func (ik iKey) num() uint64 { - ik.assert() - return binary.LittleEndian.Uint64(ik[len(ik)-8:]) -} - -func (ik iKey) parseNum() (seq uint64, kt kType) { - num := ik.num() - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) - } - return -} - -func (ik iKey) String() string { - if ik == nil { - return "" - } - - if ukey, seq, kt, err := parseIkey(ik); err == nil { - return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) - } else { - return "" - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go deleted file mode 100644 index 30eadf7..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -var defaultIComparer = &iComparer{comparer.DefaultComparer} - -func ikey(key string, seq uint64, kt kType) iKey { - return newIkey([]byte(key), uint64(seq), kt) -} - -func shortSep(a, b []byte) []byte { - dst := make([]byte, len(a)) - dst = defaultIComparer.Separator(dst[:0], a, b) - if dst == nil { - return a - } - return dst -} - -func shortSuccessor(b []byte) []byte { - dst := make([]byte, len(b)) - dst = defaultIComparer.Successor(dst[:0], b) - if dst == nil { - return b - } - return dst -} - -func testSingleKey(t *testing.T, key string, seq uint64, kt kType) { - ik := ikey(key, seq, kt) - - if !bytes.Equal(ik.ukey(), []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - - rseq, rt := ik.parseNum() - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - - if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil { - if !bytes.Equal(rukey, []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - } else { - t.Errorf("key error: %v", kerr) - } -} - -func TestIkey_EncodeDecode(t *testing.T) { - keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} - seqs := []uint64{ - 1, 2, 3, - (1 << 8) - 1, 1 << 8, (1 << 8) + 1, - (1 << 16) - 1, 1 << 16, (1 << 16) + 1, - (1 << 32) - 1, 1 << 32, (1 << 32) + 1, - } - for _, key := range keys { - for _, seq := range seqs { - testSingleKey(t, key, seq, ktVal) - testSingleKey(t, "hello", 1, ktDel) - } - } -} - -func assertBytes(t *testing.T, want, got []byte) { - if !bytes.Equal(got, want) { - t.Errorf("assert failed, got %v, want %v", got, want) - } -} - -func TestIkeyShortSeparator(t *testing.T) { - // When user keys are same - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 99, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 101, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktDel))) - - // When user keys are misordered - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("bar", 99, ktVal))) - - // When user keys are different, but correctly ordered - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSep(ikey("foo", 100, ktVal), - ikey("hello", 200, ktVal))) - - // When start user key is prefix of limit user key - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foobar", 200, ktVal))) - - // When limit user key is prefix of start user key - assertBytes(t, ikey("foobar", 100, ktVal), - shortSep(ikey("foobar", 100, ktVal), - ikey("foo", 200, ktVal))) -} - -func TestIkeyShortestSuccessor(t *testing.T) { - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSuccessor(ikey("foo", 100, ktVal))) - assertBytes(t, ikey("\xff\xff", 100, ktVal), - shortSuccessor(ikey("\xff\xff", 100, ktVal))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go deleted file mode 100644 index fefa007..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package leveldb - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestLevelDB(t *testing.T) { - testutil.RunSuite(t, "LevelDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go deleted file mode 100644 index b05084c..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - "encoding/binary" - "math/rand" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -func BenchmarkPut(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkPutRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkGet(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := range buf { - p.Get(buf[i][:]) - } -} - -func BenchmarkGetRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - p.Get(buf[rand.Int()%b.N][:]) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go deleted file mode 100644 index 18c304b..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package memdb - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestMemDB(t *testing.T) { - testutil.RunSuite(t, "MemDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go deleted file mode 100644 index 5dd6dbc..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLT(key); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestFindLast() (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLast(); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestPut(key []byte, value []byte) error { - p.Put(key, value) - return nil -} - -func (p *DB) TestDelete(key []byte) error { - p.Delete(key) - return nil -} - -func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return p.Find(key) -} - -func (p *DB) TestGet(key []byte) (value []byte, err error) { - return p.Get(key) -} - -func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { - return p.NewIterator(slice) -} - -var _ = testutil.Defer(func() { - Describe("Memdb", func() { - Describe("write test", func() { - It("should do write correctly", func() { - db := New(comparer.DefaultComparer, 0) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), - PostFn: func(t *testutil.DBTesting) { - Expect(db.Len()).Should(Equal(t.Present.Len())) - Expect(db.Size()).Should(Equal(t.Present.Size())) - switch t.Act { - case testutil.DBPut, testutil.DBOverwrite: - Expect(db.Contains(t.ActKey)).Should(BeTrue()) - default: - Expect(db.Contains(t.ActKey)).Should(BeFalse()) - } - }, - } - testutil.DoDBTesting(&t) - }) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := New(comparer.DefaultComparer, 0) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - db.Put(key, value) - }) - - if kv.Len() > 1 { - It("Should find correct keys with findLT", func() { - testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { - key_, key, _ := kv.IndexInexact(i + 1) - expectedKey, expectedValue := kv.Index(i) - - // Using key that exist. - rkey, rvalue, err := db.TestFindLT(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) - Expect(rkey).Should(Equal(expectedKey), "Key") - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) - - // Using key that doesn't exist. - rkey, rvalue, err = db.TestFindLT(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) - Expect(rkey).Should(Equal(expectedKey)) - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) - }) - }) - } - - if kv.Len() > 0 { - It("Should find last key with findLast", func() { - key, value := kv.Index(kv.Len() - 1) - rkey, rvalue, err := db.TestFindLast() - Expect(err).ShouldNot(HaveOccurred()) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value)) - }) - } - - return db - }, nil, nil) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go deleted file mode 100644 index 33c1487..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func decodeEncode(v *sessionRecord) (res bool, err error) { - b := new(bytes.Buffer) - err = v.encode(b) - if err != nil { - return - } - v2 := &sessionRecord{} - err = v.decode(b, opt.DefaultNumLevel) - if err != nil { - return - } - b2 := new(bytes.Buffer) - err = v2.encode(b2) - if err != nil { - return - } - return bytes.Equal(b.Bytes(), b2.Bytes()), nil -} - -func TestSessionRecord_EncodeDecode(t *testing.T) { - big := uint64(1) << 50 - v := &sessionRecord{} - i := uint64(0) - test := func() { - res, err := decodeEncode(v) - if err != nil { - t.Fatalf("error when testing encode/decode sessionRecord: %v", err) - } - if !res { - t.Error("encode/decode test failed at iteration:", i) - } - } - - for ; i < 4; i++ { - test() - v.addTable(3, big+300+i, big+400+i, - newIkey([]byte("foo"), big+500+1, ktVal), - newIkey([]byte("zoo"), big+600+1, ktDel)) - v.delTable(4, big+700+i) - v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal)) - } - - v.setComparer("foo") - v.setJournalNum(big + 100) - v.setPrevJournalNum(big + 99) - v.setNextFileNum(big + 200) - v.setSeqNum(big + 1000) - test() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go deleted file mode 100644 index 420b277..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reservefs. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -var errFileOpen = errors.New("leveldb/storage: file still open") - -type fileLock interface { - release() error -} - -type fileStorageLock struct { - fs *fileStorage -} - -func (lock *fileStorageLock) Release() { - fs := lock.fs - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.slock == lock { - fs.slock = nil - } - return -} - -// fileStorage is a file-system backed storage. -type fileStorage struct { - path string - - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - buf []byte - // Opened file counter; if open < 0 means closed. - open int - day int -} - -// OpenFile returns a new filesytem-backed storage implementation with the given -// path. This also hold a file lock, so any subsequent attempt to open the same -// path will fail. -// -// The storage must be closed after use, by calling Close method. -func OpenFile(path string) (Storage, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - flock, err := newFileLock(filepath.Join(path, "LOCK")) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - flock.release() - } - }() - - rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) - logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err - } - - fs := &fileStorage{path: path, flock: flock, logw: logw} - runtime.SetFinalizer(fs, (*fileStorage).Close) - return fs, nil -} - -func (fs *fileStorage) Lock() (util.Releaser, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - if fs.slock != nil { - return nil, ErrLocked - } - fs.slock = &fileStorageLock{fs: fs} - return fs.slock, nil -} - -func itoa(buf []byte, i int, wid int) []byte { - var u uint = uint(i) - if u == 0 && wid <= 1 { - return append(buf, '0') - } - - // Assemble decimal in reverse order. - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - return append(buf, b[bp:]...) -} - -func (fs *fileStorage) printDay(t time.Time) { - if fs.day == t.Day() { - return - } - fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) -} - -func (fs *fileStorage) doLog(t time.Time, str string) { - fs.printDay(t) - hour, min, sec := t.Clock() - msec := t.Nanosecond() / 1e3 - // time - fs.buf = itoa(fs.buf[:0], hour, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, min, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, sec, 2) - fs.buf = append(fs.buf, '.') - fs.buf = itoa(fs.buf, msec, 6) - fs.buf = append(fs.buf, ' ') - // write - fs.buf = append(fs.buf, []byte(str)...) - fs.buf = append(fs.buf, '\n') - fs.logw.Write(fs.buf) -} - -func (fs *fileStorage) Log(str string) { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return - } - fs.doLog(t, str) -} - -func (fs *fileStorage) log(str string) { - fs.doLog(time.Now(), str) -} - -func (fs *fileStorage) GetFile(num uint64, t FileType) File { - return &file{fs: fs, num: num, t: t} -} - -func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - f := &file{fs: fs} - for _, fn := range fnn { - if f.parse(fn) && (f.t&t) != 0 { - ff = append(ff, f) - f = &file{fs: fs} - } - } - return -} - -func (fs *fileStorage) GetManifest() (f File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - // Find latest CURRENT file. - var rem []string - var pend bool - var cerr error - for _, fn := range fnn { - if strings.HasPrefix(fn, "CURRENT") { - pend1 := len(fn) > 7 - // Make sure it is valid name for a CURRENT file, otherwise skip it. - if pend1 { - if fn[7] != '.' || len(fn) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) - continue - } - if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) - continue - } - } - path := filepath.Join(fs.path, fn) - r, e1 := os.OpenFile(path, os.O_RDONLY, 0) - if e1 != nil { - return nil, e1 - } - b, e1 := ioutil.ReadAll(r) - if e1 != nil { - r.Close() - return nil, e1 - } - f1 := &file{fs: fs} - if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) - if pend1 { - rem = append(rem, fn) - } - if !pend1 || cerr == nil { - cerr = &ErrCorrupted{ - File: fsParseName(filepath.Base(fn)), - Err: errors.New("leveldb/storage: corrupted or incomplete manifest file"), - } - } - } else if f != nil && f1.Num() < f.Num() { - fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) - if pend1 { - rem = append(rem, fn) - } - } else { - f = f1 - pend = pend1 - } - if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", fn, err)) - } - } - } - // Don't remove any files if there is no valid CURRENT file. - if f == nil { - if cerr != nil { - err = cerr - } else { - err = os.ErrNotExist - } - return - } - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) - } - } - // Remove obsolete or incomplete pending CURRENT files. - for _, fn := range rem { - path := filepath.Join(fs.path, fn) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", fn, err)) - } - } - return -} - -func (fs *fileStorage) SetManifest(f File) (err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - f2, ok := f.(*file) - if !ok || f2.t != TypeManifest { - return ErrInvalidFile - } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - _, err = fmt.Fprintln(w, f2.name()) - // Close the file first. - if err := w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) - } - if err != nil { - return err - } - return rename(path, filepath.Join(fs.path, "CURRENT")) -} - -func (fs *fileStorage) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) - } - fs.open = -1 - e1 := fs.logw.Close() - err := fs.flock.release() - if err == nil { - err = e1 - } - return err -} - -type fileWrap struct { - *os.File - f *file -} - -func (fw fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err - } - if fw.f.Type() == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.f.fs.path); err != nil { - return err - } - } - return nil -} - -func (fw fileWrap) Close() error { - f := fw.f - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if !f.open { - return ErrClosed - } - f.open = false - f.fs.open-- - err := fw.File.Close() - if err != nil { - f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) - } - return err -} - -type file struct { - fs *fileStorage - num uint64 - t FileType - open bool -} - -func (f *file) Open() (Reader, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) - if err != nil { - if f.hasOldName() && os.IsNotExist(err) { - of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) - if err == nil { - goto ok - } - } - return nil, err - } -ok: - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Create() (Writer, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err - } - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Replace(newfile File) error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - newfile2, ok := newfile.(*file) - if !ok { - return ErrInvalidFile - } - if f.open || newfile2.open { - return errFileOpen - } - return rename(newfile2.path(), f.path()) -} - -func (f *file) Type() FileType { - return f.t -} - -func (f *file) Num() uint64 { - return f.num -} - -func (f *file) Remove() error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - if f.open { - return errFileOpen - } - err := os.Remove(f.path()) - if err != nil { - f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) - } - // Also try remove file with old name, just in case. - if f.hasOldName() { - if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { - f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) - err = e1 - } - } - return err -} - -func (f *file) hasOldName() bool { - return f.t == TypeTable -} - -func (f *file) oldName() string { - switch f.t { - case TypeTable: - return fmt.Sprintf("%06d.sst", f.num) - } - return f.name() -} - -func (f *file) oldPath() string { - return filepath.Join(f.fs.path, f.oldName()) -} - -func (f *file) name() string { - switch f.t { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", f.num) - case TypeJournal: - return fmt.Sprintf("%06d.log", f.num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", f.num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", f.num) - default: - panic("invalid file type") - } -} - -func (f *file) path() string { - return filepath.Join(f.fs.path, f.name()) -} - -func fsParseName(name string) *FileInfo { - fi := &FileInfo{} - var tail string - _, err := fmt.Sscanf(name, "%d.%s", &fi.Num, &tail) - if err == nil { - switch tail { - case "log": - fi.Type = TypeJournal - case "ldb", "sst": - fi.Type = TypeTable - case "tmp": - fi.Type = TypeTemp - default: - return nil - } - return fi - } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fi.Num, &tail) - if n == 1 { - fi.Type = TypeManifest - return fi - } - return nil -} - -func (f *file) parse(name string) bool { - fi := fsParseName(name) - if fi == nil { - return false - } - f.t = fi.Type - f.num = fi.Num - return true -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go deleted file mode 100644 index 92abcbb..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "fmt" - "os" - "path/filepath" - "testing" -) - -var cases = []struct { - oldName []string - name string - ftype FileType - num uint64 -}{ - {nil, "000100.log", TypeJournal, 100}, - {nil, "000000.log", TypeJournal, 0}, - {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, - {nil, "MANIFEST-000002", TypeManifest, 2}, - {nil, "MANIFEST-000007", TypeManifest, 7}, - {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, - {nil, "000100.tmp", TypeTemp, 100}, -} - -var invalidCases = []string{ - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop", -} - -func TestFileStorage_CreateFileName(t *testing.T) { - for _, c := range cases { - f := &file{num: c.num, t: c.ftype} - if f.name() != c.name { - t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) - } - } -} - -func TestFileStorage_ParseFileName(t *testing.T) { - for _, c := range cases { - for _, name := range append([]string{c.name}, c.oldName...) { - f := new(file) - if !f.parse(name) { - t.Errorf("cannot parse filename '%s'", name) - continue - } - if f.Type() != c.ftype { - t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) - } - if f.Num() != c.num { - t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) - } - } - } -} - -func TestFileStorage_InvalidFileName(t *testing.T) { - for _, name := range invalidCases { - f := new(file) - if f.parse(name) { - t.Errorf("filename '%s' should be invalid", name) - } - } -} - -func TestFileStorage_Locking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) - - _, err := os.Stat(path) - if err == nil { - err = os.RemoveAll(path) - if err != nil { - t.Fatal("RemoveAll: got error: ", err) - } - } - - p1, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(1): got error: ", err) - } - - defer os.RemoveAll(path) - - p2, err := OpenFile(path) - if err != nil { - t.Logf("OpenFile(2): got error: %s (expected)", err) - } else { - p2.Close() - p1.Close() - t.Fatal("OpenFile(2): expect error") - } - - p1.Close() - - p3, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(3): got error: ", err) - } - defer p3.Close() - - l, err := p3.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = p3.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = p3.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go deleted file mode 100644 index fc1c816..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "os" - "sync" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 3 - -type memStorageLock struct { - ms *memStorage -} - -func (lock *memStorageLock) Release() { - ms := lock.ms - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock == lock { - ms.slock = nil - } - return -} - -// memStorage is a memory-backed storage. -type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - manifest *memFilePtr -} - -// NewMemStorage returns a new memory-backed storage implementation. -func NewMemStorage() Storage { - return &memStorage{ - files: make(map[uint64]*memFile), - } -} - -func (ms *memStorage) Lock() (util.Releaser, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock != nil { - return nil, ErrLocked - } - ms.slock = &memStorageLock{ms: ms} - return ms.slock, nil -} - -func (*memStorage) Log(str string) {} - -func (ms *memStorage) GetFile(num uint64, t FileType) File { - return &memFilePtr{ms: ms, num: num, t: t} -} - -func (ms *memStorage) GetFiles(t FileType) ([]File, error) { - ms.mu.Lock() - var ff []File - for x, _ := range ms.files { - num, mt := x>>typeShift, FileType(x)&TypeAll - if mt&t == 0 { - continue - } - ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) - } - ms.mu.Unlock() - return ff, nil -} - -func (ms *memStorage) GetManifest() (File, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.manifest == nil { - return nil, os.ErrNotExist - } - return ms.manifest, nil -} - -func (ms *memStorage) SetManifest(f File) error { - fm, ok := f.(*memFilePtr) - if !ok || fm.t != TypeManifest { - return ErrInvalidFile - } - ms.mu.Lock() - ms.manifest = fm - ms.mu.Unlock() - return nil -} - -func (*memStorage) Close() error { return nil } - -type memReader struct { - *bytes.Reader - m *memFile -} - -func (mr *memReader) Close() error { - return mr.m.Close() -} - -type memFile struct { - bytes.Buffer - ms *memStorage - open bool -} - -func (*memFile) Sync() error { return nil } -func (m *memFile) Close() error { - m.ms.mu.Lock() - m.open = false - m.ms.mu.Unlock() - return nil -} - -type memFilePtr struct { - ms *memStorage - num uint64 - t FileType -} - -func (p *memFilePtr) x() uint64 { - return p.Num()< -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "testing" -) - -func TestMemStorage(t *testing.T) { - m := NewMemStorage() - - l, err := m.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = m.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = m.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } - - f := m.GetFile(1, TypeTable) - if f.Num() != 1 && f.Type() != TypeTable { - t.Fatal("invalid file number and type") - } - w, _ := f.Create() - w.Write([]byte("abc")) - w.Close() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { - t.Fatal("invalid GetFiles len") - } - buf := new(bytes.Buffer) - r, err := f.Open() - if err != nil { - t.Fatal("Open: got error: ", err) - } - buf.ReadFrom(r) - r.Close() - if got := buf.String(); got != "abc" { - t.Fatalf("Read: invalid value, want=abc got=%s", got) - } - if _, err := f.Open(); err != nil { - t.Fatal("Open: got error: ", err) - } - if _, err := m.GetFile(1, TypeTable).Open(); err == nil { - t.Fatal("expecting error") - } - f.Remove() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { - t.Fatal("invalid GetFiles len", len(ff)) - } - if _, err := f.Open(); err == nil { - t.Fatal("expecting error") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go deleted file mode 100644 index 08be0ba..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENE file. - -package leveldb - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 4 - -var ( - tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") - tsErrFileOpen = errors.New("leveldb.testStorage: file still open") -) - -var ( - tsFSEnv = os.Getenv("GOLEVELDB_USEFS") - tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR") - tsKeepFS = tsFSEnv == "2" - tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" - tsMU = &sync.Mutex{} - tsNum = 0 -) - -type tsOp uint - -const ( - tsOpOpen tsOp = iota - tsOpCreate - tsOpReplace - tsOpRemove - tsOpRead - tsOpReadAt - tsOpWrite - tsOpSync - - tsOpNum -) - -type tsLock struct { - ts *testStorage - r util.Releaser -} - -func (l tsLock) Release() { - l.r.Release() - l.ts.t.Log("I: storage lock released") -} - -type tsReader struct { - tf tsFile - storage.Reader -} - -func (tr tsReader) Read(b []byte) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpRead) { - return 0, errors.New("leveldb.testStorage: emulated read error") - } - n, err = tr.Reader.Read(b) - if err != nil && err != io.EOF { - ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) - } - return -} - -func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpReadAt) { - return 0, errors.New("leveldb.testStorage: emulated readAt error") - } - n, err = tr.Reader.ReadAt(b, off) - if err != nil && err != io.EOF { - ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) - } - return -} - -func (tr tsReader) Close() (err error) { - err = tr.Reader.Close() - tr.tf.close("reader", err) - return -} - -type tsWriter struct { - tf tsFile - storage.Writer -} - -func (tw tsWriter) Write(b []byte) (n int, err error) { - if tw.tf.shouldErrLocked(tsOpWrite) { - return 0, errors.New("leveldb.testStorage: emulated write error") - } - n, err = tw.Writer.Write(b) - if err != nil { - tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) - } - return -} - -func (tw tsWriter) Sync() (err error) { - ts := tw.tf.ts - ts.mu.Lock() - for ts.emuDelaySync&tw.tf.Type() != 0 { - ts.cond.Wait() - } - ts.mu.Unlock() - if tw.tf.shouldErrLocked(tsOpSync) { - return errors.New("leveldb.testStorage: emulated sync error") - } - err = tw.Writer.Sync() - if err != nil { - tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) - } - return -} - -func (tw tsWriter) Close() (err error) { - err = tw.Writer.Close() - tw.tf.close("writer", err) - return -} - -type tsFile struct { - ts *testStorage - storage.File -} - -func (tf tsFile) x() uint64 { - return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll - ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) - } - } - ts.mu.Unlock() -} - -func newTestStorage(t *testing.T) *testStorage { - var stor storage.Storage - var closeFn func() error - if tsFS { - for { - tsMU.Lock() - num := tsNum - tsNum++ - tsMU.Unlock() - tempdir := tsTempdir - if tempdir == "" { - tempdir = os.TempDir() - } - path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); err != nil { - stor, err = storage.OpenFile(path) - if err != nil { - t.Fatalf("F: cannot create storage: %v", err) - } - t.Logf("I: storage created: %s", path) - closeFn = func() error { - for _, name := range []string{"LOG.old", "LOG"} { - f, err := os.Open(filepath.Join(path, name)) - if err != nil { - continue - } - if log, err := ioutil.ReadAll(f); err != nil { - t.Logf("---------------------- %s ----------------------", name) - t.Logf("cannot read log: %v", err) - t.Logf("---------------------- %s ----------------------", name) - } else if len(log) > 0 { - t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) - t.Logf("---------------------- %s ----------------------", name) - } - f.Close() - } - if t.Failed() { - t.Logf("testing failed, test DB preserved at %s", path) - return nil - } - if tsKeepFS { - return nil - } - return os.RemoveAll(path) - } - - break - } - } - } else { - stor = storage.NewMemStorage() - } - ts := &testStorage{ - t: t, - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - emuErrOnceMap: make(map[uint64]uint), - emuRandErrProb: 0x999, - emuRandRand: rand.New(rand.NewSource(0xfacedead)), - } - ts.cond.L = &ts.mu - return ts -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go deleted file mode 100644 index 00e6f9e..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type blockTesting struct { - tr *Reader - b *block -} - -func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.tr.newBlockIter(t.b, nil, slice, false) -} - -var _ = testutil.Defer(func() { - Describe("Block", func() { - Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting { - // Building the block. - bw := &blockWriter{ - restartInterval: restartInterval, - scratch: make([]byte, 30), - } - kv.Iterate(func(i int, key, value []byte) { - bw.append(key, value) - }) - bw.finish() - - // Opening the block. - data := bw.buf.Bytes() - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - return &blockTesting{ - tr: &Reader{cmp: comparer.DefaultComparer}, - b: &block{ - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - }, - } - } - - Describe("read test", func() { - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - kv := &testutil.KeyValue{} - Text := func() string { - return fmt.Sprintf("and %d keys", kv.Len()) - } - - Test := func() { - // Make block. - br := Build(kv, restartInterval) - // Do testing. - testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil) - } - - Describe(Text(), Test) - - kv.PutString("", "empty") - Describe(Text(), Test) - - kv.PutString("a1", "foo") - Describe(Text(), Test) - - kv.PutString("a2", "v") - Describe(Text(), Test) - - kv.PutString("a3qqwrkks", "hello") - Describe(Text(), Test) - - kv.PutString("a4", "bar") - Describe(Text(), Test) - - kv.PutString("a5111111", "v5") - kv.PutString("a6", "") - kv.PutString("a7", "v7") - kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") - kv.PutString("b", "v9") - kv.PutString("c9", "v9") - kv.PutString("c91", "v9") - kv.PutString("d0", "v9") - Describe(Text(), Test) - }) - } - }) - - Describe("out-of-bound slice test", func() { - kv := &testutil.KeyValue{} - kv.PutString("k1", "v1") - kv.PutString("k2", "v2") - kv.PutString("k3abcdefgg", "v3") - kv.PutString("k4", "v4") - kv.PutString("k5", "v5") - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - // Make block. - bt := Build(kv, restartInterval) - - Test := func(r *util.Range) func(done Done) { - return func(done Done) { - iter := bt.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: iter, - } - - testutil.DoIteratorTesting(&t) - iter.Release() - done <- true - } - } - - It("Should do iterations and seeks correctly #0", - Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) - - It("Should do iterations and seeks correctly #1", - Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) - }) - } - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go deleted file mode 100644 index 6465da6..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package table - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestTable(t *testing.T) { - testutil.RunSuite(t, "Table Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go deleted file mode 100644 index 4b59b31..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "bytes" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type tableWrapper struct { - *Reader -} - -func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return t.Reader.Find(key, false, nil) -} - -func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { - return t.Reader.Get(key, nil) -} - -func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.Reader.NewIterator(slice, nil) -} - -var _ = testutil.Defer(func() { - Describe("Table", func() { - Describe("approximate offset test", func() { - var ( - buf = &bytes.Buffer{} - o = &opt.Options{ - BlockSize: 1024, - Compression: opt.NoCompression, - } - ) - - // Building the table. - tw := NewWriter(buf, o) - tw.Append([]byte("k01"), []byte("hello")) - tw.Append([]byte("k02"), []byte("hello2")) - tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) - tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) - tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) - tw.Append([]byte("k06"), []byte("hello3")) - tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) - err := tw.Close() - - It("Should be able to approximate offset of a key correctly", func() { - Expect(err).ShouldNot(HaveOccurred()) - - tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - Expect(err).ShouldNot(HaveOccurred()) - CheckOffset := func(key string, expect, threshold int) { - offset, err := tr.OffsetOf([]byte(key)) - Expect(err).ShouldNot(HaveOccurred()) - Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) - } - - CheckOffset("k0", 0, 0) - CheckOffset("k01a", 0, 0) - CheckOffset("k02", 0, 0) - CheckOffset("k03", 0, 0) - CheckOffset("k04", 10000, 1000) - CheckOffset("k04a", 210000, 1000) - CheckOffset("k05", 210000, 1000) - CheckOffset("k06", 510000, 1000) - CheckOffset("k07", 510000, 1000) - CheckOffset("xyz", 610000, 2000) - }) - }) - - Describe("read test", func() { - Build := func(kv testutil.KeyValue) testutil.DB { - o := &opt.Options{ - BlockSize: 512, - BlockRestartInterval: 3, - } - buf := &bytes.Buffer{} - - // Building the table. - tw := NewWriter(buf, o) - kv.Iterate(func(i int, key, value []byte) { - tw.Append(key, value) - }) - tw.Close() - - // Opening the table. - tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - return tableWrapper{tr} - } - Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { - return func() { - db := Build(*kv) - if body != nil { - body(db.(tableWrapper).Reader) - } - testutil.KeyValueTesting(nil, *kv, db, nil, nil) - } - } - - testutil.AllKeyValueTesting(nil, Build, nil, nil) - Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { - It("should have correct blocks number", func() { - indexBlock, err := r.readBlock(r.indexBH, true) - Expect(err).To(BeNil()) - Expect(indexBlock.restartsLen).Should(Equal(9)) - }) - })) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go deleted file mode 100644 index ec3f177..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type DB interface{} - -type Put interface { - TestPut(key []byte, value []byte) error -} - -type Delete interface { - TestDelete(key []byte) error -} - -type Find interface { - TestFind(key []byte) (rkey, rvalue []byte, err error) -} - -type Get interface { - TestGet(key []byte) (value []byte, err error) -} - -type Has interface { - TestHas(key []byte) (ret bool, err error) -} - -type NewIterator interface { - TestNewIterator(slice *util.Range) iterator.Iterator -} - -type DBAct int - -func (a DBAct) String() string { - switch a { - case DBNone: - return "none" - case DBPut: - return "put" - case DBOverwrite: - return "overwrite" - case DBDelete: - return "delete" - case DBDeleteNA: - return "delete_na" - } - return "unknown" -} - -const ( - DBNone DBAct = iota - DBPut - DBOverwrite - DBDelete - DBDeleteNA -) - -type DBTesting struct { - Rand *rand.Rand - DB interface { - Get - Put - Delete - } - PostFn func(t *DBTesting) - Deleted, Present KeyValue - Act, LastAct DBAct - ActKey, LastActKey []byte -} - -func (t *DBTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *DBTesting) setAct(act DBAct, key []byte) { - t.LastAct, t.Act = t.Act, act - t.LastActKey, t.ActKey = t.ActKey, key -} - -func (t *DBTesting) text() string { - return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) -} - -func (t *DBTesting) Text() string { - return "DBTesting " + t.text() -} - -func (t *DBTesting) TestPresentKV(key, value []byte) { - rvalue, err := t.DB.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) - Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllPresent() { - t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestPresentKV(key, value) - }) -} - -func (t *DBTesting) TestDeletedKey(key []byte) { - _, err := t.DB.TestGet(key) - Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllDeleted() { - t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestDeletedKey(key) - }) -} - -func (t *DBTesting) TestAll() { - dn := t.Deleted.Len() - pn := t.Present.Len() - ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { - if i >= dn { - key, value := t.Present.Index(i - dn) - t.TestPresentKV(key, value) - } else { - t.TestDeletedKey(t.Deleted.KeyAt(i)) - } - }) -} - -func (t *DBTesting) Put(key, value []byte) { - if new := t.Present.PutU(key, value); new { - t.setAct(DBPut, key) - } else { - t.setAct(DBOverwrite, key) - } - t.Deleted.Delete(key) - err := t.DB.TestPut(key, value) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestPresentKV(key, value) - t.post() -} - -func (t *DBTesting) PutRandom() bool { - if t.Deleted.Len() > 0 { - i := t.Rand.Intn(t.Deleted.Len()) - key, value := t.Deleted.Index(i) - t.Put(key, value) - return true - } - return false -} - -func (t *DBTesting) Delete(key []byte) { - if exist, value := t.Present.Delete(key); exist { - t.setAct(DBDelete, key) - t.Deleted.PutU(key, value) - } else { - t.setAct(DBDeleteNA, key) - } - err := t.DB.TestDelete(key) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestDeletedKey(key) - t.post() -} - -func (t *DBTesting) DeleteRandom() bool { - if t.Present.Len() > 0 { - i := t.Rand.Intn(t.Present.Len()) - t.Delete(t.Present.KeyAt(i)) - return true - } - return false -} - -func (t *DBTesting) RandomAct(round int) { - for i := 0; i < round; i++ { - if t.Rand.Int()%2 == 0 { - t.PutRandom() - } else { - t.DeleteRandom() - } - } -} - -func DoDBTesting(t *DBTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - - t.DeleteRandom() - t.PutRandom() - t.DeleteRandom() - t.DeleteRandom() - for i := t.Deleted.Len() / 2; i >= 0; i-- { - t.PutRandom() - } - t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) - - // Additional iterator testing - if db, ok := t.DB.(NewIterator); ok { - iter := db.TestNewIterator(nil) - Expect(iter.Error()).NotTo(HaveOccurred()) - - it := IteratorTesting{ - KeyValue: t.Present, - Iter: iter, - } - - DoIteratorTesting(&it) - iter.Release() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go deleted file mode 100644 index 82f3d0e..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go +++ /dev/null @@ -1,21 +0,0 @@ -package testutil - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func RunSuite(t GinkgoTestingT, name string) { - RunDefer() - - SynchronizedBeforeSuite(func() []byte { - RunDefer("setup") - return nil - }, func(data []byte) {}) - SynchronizedAfterSuite(func() { - RunDefer("teardown") - }, func() {}) - - RegisterFailHandler(Fail) - RunSpecs(t, name) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go deleted file mode 100644 index df6d9db..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" -) - -type IterAct int - -func (a IterAct) String() string { - switch a { - case IterNone: - return "none" - case IterFirst: - return "first" - case IterLast: - return "last" - case IterPrev: - return "prev" - case IterNext: - return "next" - case IterSeek: - return "seek" - case IterSOI: - return "soi" - case IterEOI: - return "eoi" - } - return "unknown" -} - -const ( - IterNone IterAct = iota - IterFirst - IterLast - IterPrev - IterNext - IterSeek - IterSOI - IterEOI -) - -type IteratorTesting struct { - KeyValue - Iter iterator.Iterator - Rand *rand.Rand - PostFn func(t *IteratorTesting) - Pos int - Act, LastAct IterAct - - once bool -} - -func (t *IteratorTesting) init() { - if !t.once { - t.Pos = -1 - t.once = true - } -} - -func (t *IteratorTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *IteratorTesting) setAct(act IterAct) { - t.LastAct, t.Act = t.Act, act -} - -func (t *IteratorTesting) text() string { - return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) -} - -func (t *IteratorTesting) Text() string { - return "IteratorTesting is " + t.text() -} - -func (t *IteratorTesting) IsFirst() bool { - t.init() - return t.Len() > 0 && t.Pos == 0 -} - -func (t *IteratorTesting) IsLast() bool { - t.init() - return t.Len() > 0 && t.Pos == t.Len()-1 -} - -func (t *IteratorTesting) TestKV() { - t.init() - key, value := t.Index(t.Pos) - Expect(t.Iter.Key()).NotTo(BeNil()) - Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) - Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *IteratorTesting) First() { - t.init() - t.setAct(IterFirst) - - ok := t.Iter.First() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = 0 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Last() { - t.init() - t.setAct(IterLast) - - ok := t.Iter.Last() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = t.Len() - 1 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = 0 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Next() { - t.init() - t.setAct(IterNext) - - ok := t.Iter.Next() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos < t.Len()-1 { - t.Pos++ - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = t.Len() - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Prev() { - t.init() - t.setAct(IterPrev) - - ok := t.Iter.Prev() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos > 0 { - t.Pos-- - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Seek(i int) { - t.init() - t.setAct(IterSeek) - - key, _ := t.Index(i) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekInexact(i int) { - t.init() - t.setAct(IterSeek) - var key0 []byte - key1, _ := t.Index(i) - if i > 0 { - key0, _ = t.Index(i - 1) - } - key := BytesSeparator(key0, key1) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekKey(key []byte) { - t.init() - t.setAct(IterSeek) - oldKey, _ := t.IndexOrNil(t.Pos) - i := t.Search(key) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if i < t.Len() { - key_, _ := t.Index(i) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) - t.Pos = i - t.TestKV() - } else { - Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) - } - - t.Pos = i - t.post() -} - -func (t *IteratorTesting) SOI() { - t.init() - t.setAct(IterSOI) - Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) - for i := 0; i < 3; i++ { - t.Prev() - } - t.post() -} - -func (t *IteratorTesting) EOI() { - t.init() - t.setAct(IterEOI) - Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) - for i := 0; i < 3; i++ { - t.Next() - } - t.post() -} - -func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos > 0; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) - } -} - -func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) - } -} - -func (t *IteratorTesting) PrevAll() { - t.WalkPrev(func(t *IteratorTesting) { - t.Prev() - }) -} - -func (t *IteratorTesting) NextAll() { - t.WalkNext(func(t *IteratorTesting) { - t.Next() - }) -} - -func DoIteratorTesting(t *IteratorTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - t.SOI() - t.NextAll() - t.First() - t.SOI() - t.NextAll() - t.EOI() - t.PrevAll() - t.Last() - t.EOI() - t.PrevAll() - t.SOI() - - t.NextAll() - t.PrevAll() - t.NextAll() - t.Last() - t.PrevAll() - t.First() - t.NextAll() - t.EOI() - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.SeekInexact(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - if i%2 != 0 { - t.PrevAll() - t.SOI() - } else { - t.NextAll() - t.EOI() - } - }) - - for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { - t.SeekKey([]byte(key)) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go deleted file mode 100644 index 471d570..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - "sort" - "strings" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -type KeyValueEntry struct { - key, value []byte -} - -type KeyValue struct { - entries []KeyValueEntry - nbytes int -} - -func (kv *KeyValue) Put(key, value []byte) { - if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { - panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) - } - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - kv.nbytes += len(key) + len(value) -} - -func (kv *KeyValue) PutString(key, value string) { - kv.Put([]byte(key), []byte(value)) -} - -func (kv *KeyValue) PutU(key, value []byte) bool { - if i, exist := kv.Get(key); !exist { - if i < kv.Len() { - kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) - kv.entries[i] = KeyValueEntry{key, value} - } else { - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - } - kv.nbytes += len(key) + len(value) - return true - } else { - kv.nbytes += len(value) - len(kv.ValueAt(i)) - kv.entries[i].value = value - } - return false -} - -func (kv *KeyValue) PutUString(key, value string) bool { - return kv.PutU([]byte(key), []byte(value)) -} - -func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { - i, exist := kv.Get(key) - if exist { - value = kv.entries[i].value - kv.DeleteIndex(i) - } - return -} - -func (kv *KeyValue) DeleteIndex(i int) bool { - if i < kv.Len() { - kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) - kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) - return true - } - return false -} - -func (kv KeyValue) Len() int { - return len(kv.entries) -} - -func (kv *KeyValue) Size() int { - return kv.nbytes -} - -func (kv KeyValue) KeyAt(i int) []byte { - return kv.entries[i].key -} - -func (kv KeyValue) ValueAt(i int) []byte { - return kv.entries[i].value -} - -func (kv KeyValue) Index(i int) (key, value []byte) { - if i < 0 || i >= len(kv.entries) { - panic(fmt.Sprintf("Index #%d: out of range", i)) - } - return kv.entries[i].key, kv.entries[i].value -} - -func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { - key, value = kv.Index(i) - var key0 []byte - var key1 = kv.KeyAt(i) - if i > 0 { - key0 = kv.KeyAt(i - 1) - } - key_ = BytesSeparator(key0, key1) - return -} - -func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { - if i >= 0 && i < len(kv.entries) { - return kv.entries[i].key, kv.entries[i].value - } - return nil, nil -} - -func (kv KeyValue) IndexString(i int) (key, value string) { - key_, _value := kv.Index(i) - return string(key_), string(_value) -} - -func (kv KeyValue) Search(key []byte) int { - return sort.Search(kv.Len(), func(i int) bool { - return cmp.Compare(kv.KeyAt(i), key) >= 0 - }) -} - -func (kv KeyValue) SearchString(key string) int { - return kv.Search([]byte(key)) -} - -func (kv KeyValue) Get(key []byte) (i int, exist bool) { - i = kv.Search(key) - if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { - exist = true - } - return -} - -func (kv KeyValue) GetString(key string) (i int, exist bool) { - return kv.Get([]byte(key)) -} - -func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { - for i, x := range kv.entries { - fn(i, x.key, x.value) - } -} - -func (kv KeyValue) IterateString(fn func(i int, key, value string)) { - kv.Iterate(func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { - ShuffledIndex(rnd, kv.Len(), 1, func(i int) { - fn(i, kv.entries[i].key, kv.entries[i].value) - }) -} - -func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { - kv.IterateShuffled(rnd, func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { - for i := range kv.entries { - key_, key, value := kv.IndexInexact(i) - fn(i, key_, key, value) - } -} - -func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { - kv.IterateInexact(func(i int, key_, key, value []byte) { - fn(i, string(key_), string(key), string(value)) - }) -} - -func (kv KeyValue) Clone() KeyValue { - return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} -} - -func (kv KeyValue) Slice(start, limit int) KeyValue { - if start < 0 || limit > kv.Len() { - panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) - } else if limit < start { - panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) - } - return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} -} - -func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { - start_ := 0 - limit_ := kv.Len() - if start != nil { - start_ = kv.Search(start) - } - if limit != nil { - limit_ = kv.Search(limit) - } - return kv.Slice(start_, limit_) -} - -func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { - return kv.SliceKey([]byte(start), []byte(limit)) -} - -func (kv KeyValue) SliceRange(r *util.Range) KeyValue { - if r != nil { - return kv.SliceKey(r.Start, r.Limit) - } - return kv.Clone() -} - -func (kv KeyValue) Range(start, limit int) (r util.Range) { - if kv.Len() > 0 { - if start == kv.Len() { - r.Start = BytesAfter(kv.KeyAt(start - 1)) - } else { - r.Start = kv.KeyAt(start) - } - } - if limit < kv.Len() { - r.Limit = kv.KeyAt(limit) - } - return -} - -func KeyValue_EmptyKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("", "v") - return kv -} - -func KeyValue_EmptyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "") - kv.PutString("abcd", "") - return kv -} - -func KeyValue_OneKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "v") - return kv -} - -func KeyValue_BigValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("big1", strings.Repeat("1", 200000)) - return kv -} - -func KeyValue_SpecialKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("\xff\xff", "v3") - return kv -} - -func KeyValue_MultipleKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("a", "v") - kv.PutString("aa", "v1") - kv.PutString("aaa", "v2") - kv.PutString("aaacccccccccc", "v2") - kv.PutString("aaaccccccccccd", "v3") - kv.PutString("aaaccccccccccf", "v4") - kv.PutString("aaaccccccccccfg", "v5") - kv.PutString("ab", "v6") - kv.PutString("abc", "v7") - kv.PutString("abcd", "v8") - kv.PutString("accccccccccccccc", "v9") - kv.PutString("b", "v10") - kv.PutString("bb", "v11") - kv.PutString("bc", "v12") - kv.PutString("c", "v13") - kv.PutString("c1", "v13") - kv.PutString("czzzzzzzzzzzzzz", "v14") - kv.PutString("fffffffffffffff", "v15") - kv.PutString("g11", "v15") - kv.PutString("g111", "v15") - kv.PutString("g111\xff", "v15") - kv.PutString("zz", "v16") - kv.PutString("zzzzzzz", "v16") - kv.PutString("zzzzzzzzzzzzzzzz", "v16") - return kv -} - -var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") - -func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { - if rnd == nil { - rnd = NewRand() - } - if maxlen < minlen { - panic("max len should >= min len") - } - - rrand := func(min, max int) int { - if min == max { - return max - } - return rnd.Intn(max-min) + min - } - - kv := &KeyValue{} - endC := byte(len(keymap) - 1) - gen := make([]byte, 0, maxlen) - for i := 0; i < n; i++ { - m := rrand(minlen, maxlen) - last := gen - retry: - gen = last[:m] - if k := len(last); m > k { - for j := k; j < m; j++ { - gen[j] = 0 - } - } else { - for j := m - 1; j >= 0; j-- { - c := last[j] - if c == endC { - continue - } - gen[j] = c + 1 - for j += 1; j < m; j++ { - gen[j] = 0 - } - goto ok - } - if m < maxlen { - m++ - goto retry - } - panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) - ok: - } - key := make([]byte, m) - for j := 0; j < m; j++ { - key[j] = keymap[gen[j]] - } - value := make([]byte, rrand(vminlen, vmaxlen)) - for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { - value[n] = 'x' - } - kv.Put(key, value) - } - return kv -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go deleted file mode 100644 index a0b58f0..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) { - if rnd == nil { - rnd = NewRand() - } - - if p == nil { - BeforeEach(func() { - p = setup(kv) - }) - if teardown != nil { - AfterEach(func() { - teardown(p) - }) - } - } - - It("Should find all keys with Find", func() { - if db, ok := p.(Find); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rkey, rvalue, err := db.TestFind(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rkey).Should(Equal(key), "Key") - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - rkey, rvalue, err = db.TestFind(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) - }) - } - }) - - It("Should return error if the key is not present", func() { - if db, ok := p.(Find); ok { - var key []byte - if kv.Len() > 0 { - key_, _ := kv.Index(kv.Len() - 1) - key = BytesAfter(key_) - } - rkey, _, err := db.TestFind(key) - Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - - It("Should only find exact key with Get", func() { - if db, ok := p.(Get); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rvalue, err := db.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - _, err = db.TestGet(key_) - Expect(err).Should(HaveOccurred(), "Error for key %q", key_) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - } - }) - - It("Should only find present key with Has", func() { - if db, ok := p.(Has); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, _ := kv.IndexInexact(i) - - // Using exact key. - ret, err := db.TestHas(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(ret).Should(BeTrue(), "False for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - ret, err = db.TestHas(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_) - Expect(ret).ShouldNot(BeTrue(), "True for key %q", key) - } - }) - } - }) - - TestIter := func(r *util.Range, _kv KeyValue) { - if db, ok := p.(NewIterator); ok { - iter := db.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := IteratorTesting{ - KeyValue: _kv, - Iter: iter, - } - - DoIteratorTesting(&t) - iter.Release() - } - } - - It("Should iterates and seeks correctly", func(done Done) { - TestIter(nil, kv.Clone()) - done <- true - }, 3.0) - - RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) { - type slice struct { - r *util.Range - start, limit int - } - - key_, _, _ := kv.IndexInexact(i) - for _, x := range []slice{ - {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, - {&util.Range{Start: nil, Limit: key_}, 0, i}, - } { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { - TestIter(x.r, kv.Slice(x.start, x.limit)) - done <- true - }, 3.0) - } - }) - - RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { - r := kv.Range(start, limit) - TestIter(&r, kv.Slice(start, limit)) - done <- true - }, 3.0) - }) -} - -func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) { - Test := func(kv *KeyValue) func() { - return func() { - var p DB - if setup != nil { - Defer("setup", func() { - p = setup(*kv) - }) - } - if teardown != nil { - Defer("teardown", func() { - teardown(p) - }) - } - if body != nil { - p = body(*kv) - } - KeyValueTesting(rnd, *kv, p, func(KeyValue) DB { - return p - }, nil) - } - } - - Describe("with no key/value (empty)", Test(&KeyValue{})) - Describe("with empty key", Test(KeyValue_EmptyKey())) - Describe("with empty value", Test(KeyValue_EmptyValue())) - Describe("with one key/value", Test(KeyValue_OneKeyValue())) - Describe("with big value", Test(KeyValue_BigValue())) - Describe("with special key", Test(KeyValue_SpecialKey())) - Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) - Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go deleted file mode 100644 index 59c496d..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - storageMu sync.Mutex - storageUseFS bool = true - storageKeepFS bool = false - storageNum int -) - -type StorageMode int - -const ( - ModeOpen StorageMode = 1 << iota - ModeCreate - ModeRemove - ModeRead - ModeWrite - ModeSync - ModeClose -) - -const ( - modeOpen = iota - modeCreate - modeRemove - modeRead - modeWrite - modeSync - modeClose - - modeCount -) - -const ( - typeManifest = iota - typeJournal - typeTable - typeTemp - - typeCount -) - -const flattenCount = modeCount * typeCount - -func flattenType(m StorageMode, t storage.FileType) int { - var x int - switch m { - case ModeOpen: - x = modeOpen - case ModeCreate: - x = modeCreate - case ModeRemove: - x = modeRemove - case ModeRead: - x = modeRead - case ModeWrite: - x = modeWrite - case ModeSync: - x = modeSync - case ModeClose: - x = modeClose - default: - panic("invalid storage mode") - } - x *= typeCount - switch t { - case storage.TypeManifest: - return x + typeManifest - case storage.TypeJournal: - return x + typeJournal - case storage.TypeTable: - return x + typeTable - case storage.TypeTemp: - return x + typeTemp - default: - panic("invalid file type") - } -} - -func listFlattenType(m StorageMode, t storage.FileType) []int { - ret := make([]int, 0, flattenCount) - add := func(x int) { - x *= typeCount - switch { - case t&storage.TypeManifest != 0: - ret = append(ret, x+typeManifest) - case t&storage.TypeJournal != 0: - ret = append(ret, x+typeJournal) - case t&storage.TypeTable != 0: - ret = append(ret, x+typeTable) - case t&storage.TypeTemp != 0: - ret = append(ret, x+typeTemp) - } - } - switch { - case m&ModeOpen != 0: - add(modeOpen) - case m&ModeCreate != 0: - add(modeCreate) - case m&ModeRemove != 0: - add(modeRemove) - case m&ModeRead != 0: - add(modeRead) - case m&ModeWrite != 0: - add(modeWrite) - case m&ModeSync != 0: - add(modeSync) - case m&ModeClose != 0: - add(modeClose) - } - return ret -} - -func packFile(num uint64, t storage.FileType) uint64 { - if num>>(64-typeCount) != 0 { - panic("overflow") - } - return num<> typeCount, storage.FileType(x) & storage.TypeAll -} - -type emulatedError struct { - err error -} - -func (err emulatedError) Error() string { - return fmt.Sprintf("emulated storage error: %v", err.err) -} - -type storageLock struct { - s *Storage - r util.Releaser -} - -func (l storageLock) Release() { - l.r.Release() - l.s.logI("storage lock released") -} - -type reader struct { - f *file - storage.Reader -} - -func (r *reader) Read(p []byte) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.Read(p) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) - } - return -} - -func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.ReadAt(p, off) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) - } - return -} - -func (r *reader) Close() (err error) { - return r.f.doClose(r.Reader) -} - -type writer struct { - f *file - storage.Writer -} - -func (w *writer) Write(p []byte) (n int, err error) { - err = w.f.s.emulateError(ModeWrite, w.f.Type()) - if err == nil { - w.f.s.stall(ModeWrite, w.f.Type()) - n, err = w.Writer.Write(p) - } - w.f.s.count(ModeWrite, w.f.Type(), n) - if err != nil && err != io.EOF { - w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) - } - return -} - -func (w *writer) Sync() (err error) { - err = w.f.s.emulateError(ModeSync, w.f.Type()) - if err == nil { - w.f.s.stall(ModeSync, w.f.Type()) - err = w.Writer.Sync() - } - w.f.s.count(ModeSync, w.f.Type(), 0) - if err != nil { - w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) - } - return -} - -func (w *writer) Close() (err error) { - return w.f.doClose(w.Writer) -} - -type file struct { - s *Storage - storage.File -} - -func (f *file) pack() uint64 { - return packFile(f.Num(), f.Type()) -} - -func (f *file) assertOpen() { - ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) -} - -func (f *file) doClose(closer io.Closer) (err error) { - err = f.s.emulateError(ModeClose, f.Type()) - if err == nil { - f.s.stall(ModeClose, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) - err = closer.Close() - } - f.s.countNB(ModeClose, f.Type(), 0) - writer := f.s.opens[f.pack()] - if err != nil { - f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) - } else { - f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) - delete(f.s.opens, f.pack()) - } - return -} - -func (f *file) Open() (r storage.Reader, err error) { - err = f.s.emulateError(ModeOpen, f.Type()) - if err == nil { - f.s.stall(ModeOpen, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeOpen, f.Type(), 0) - r, err = f.File.Open() - } - if err != nil { - f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = false - r = &reader{f, r} - } - return -} - -func (f *file) Create() (w storage.Writer, err error) { - err = f.s.emulateError(ModeCreate, f.Type()) - if err == nil { - f.s.stall(ModeCreate, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeCreate, f.Type(), 0) - w, err = f.File.Create() - } - if err != nil { - f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = true - w = &writer{f, w} - } - return -} - -func (f *file) Remove() (err error) { - err = f.s.emulateError(ModeRemove, f.Type()) - if err == nil { - f.s.stall(ModeRemove, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeRemove, f.Type(), 0) - err = f.File.Remove() - } - if err != nil { - f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) - } - return -} - -type Storage struct { - storage.Storage - closeFn func() error - - lmu sync.Mutex - lb bytes.Buffer - - mu sync.Mutex - // Open files, true=writer, false=reader - opens map[uint64]bool - counters [flattenCount]int - bytesCounter [flattenCount]int64 - emulatedError [flattenCount]error - stallCond sync.Cond - stalled [flattenCount]bool -} - -func (s *Storage) log(skip int, str string) { - s.lmu.Lock() - defer s.lmu.Unlock() - _, file, line, ok := runtime.Caller(skip + 2) - if ok { - // Truncate file name at last file name separator. - if index := strings.LastIndex(file, "/"); index >= 0 { - file = file[index+1:] - } else if index = strings.LastIndex(file, "\\"); index >= 0 { - file = file[index+1:] - } - } else { - file = "???" - line = 1 - } - fmt.Fprintf(&s.lb, "%s:%d: ", file, line) - lines := strings.Split(str, "\n") - if l := len(lines); l > 1 && lines[l-1] == "" { - lines = lines[:l-1] - } - for i, line := range lines { - if i > 0 { - s.lb.WriteString("\n\t") - } - s.lb.WriteString(line) - } - s.lb.WriteByte('\n') -} - -func (s *Storage) logISkip(skip int, format string, args ...interface{}) { - pc, _, _, ok := runtime.Caller(skip + 1) - if ok { - if f := runtime.FuncForPC(pc); f != nil { - fname := f.Name() - if index := strings.LastIndex(fname, "."); index >= 0 { - fname = fname[index+1:] - } - format = fname + ": " + format - } - } - s.log(skip+1, fmt.Sprintf(format, args...)) -} - -func (s *Storage) logI(format string, args ...interface{}) { - s.logISkip(1, format, args...) -} - -func (s *Storage) Log(str string) { - s.log(1, "Log: "+str) - s.Storage.Log(str) -} - -func (s *Storage) Lock() (r util.Releaser, err error) { - r, err = s.Storage.Lock() - if err != nil { - s.logI("storage locking failed, err=%v", err) - } else { - s.logI("storage locked") - r = storageLock{s, r} - } - return -} - -func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { - return &file{s, s.Storage.GetFile(num, t)} -} - -func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { - rfiles, err := s.Storage.GetFiles(t) - if err != nil { - s.logI("get files failed, err=%v", err) - return - } - files = make([]storage.File, len(rfiles)) - for i, f := range rfiles { - files[i] = &file{s, f} - } - s.logI("get files, type=0x%x count=%d", int(t), len(files)) - return -} - -func (s *Storage) GetManifest() (f storage.File, err error) { - manifest, err := s.Storage.GetManifest() - if err != nil { - if !os.IsNotExist(err) { - s.logI("get manifest failed, err=%v", err) - } - return - } - s.logI("get manifest, num=%d", manifest.Num()) - return &file{s, manifest}, nil -} - -func (s *Storage) SetManifest(f storage.File) error { - f_, ok := f.(*file) - ExpectWithOffset(1, ok).To(BeTrue()) - ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) - err := s.Storage.SetManifest(f_.File) - if err != nil { - s.logI("set manifest failed, err=%v", err) - } else { - s.logI("set manifest, num=%d", f_.Num()) - } - return err -} - -func (s *Storage) openFiles() string { - out := "Open files:" - for x, writer := range s.opens { - num, t := unpackFile(x) - out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) - } - return out -} - -func (s *Storage) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) - err := s.Storage.Close() - if err != nil { - s.logI("storage closing failed, err=%v", err) - } else { - s.logI("storage closed") - } - if s.closeFn != nil { - if err1 := s.closeFn(); err1 != nil { - s.logI("close func error, err=%v", err1) - } - } - return err -} - -func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { - s.counters[flattenType(m, t)]++ - s.bytesCounter[flattenType(m, t)] += int64(n) -} - -func (s *Storage) count(m StorageMode, t storage.FileType, n int) { - s.mu.Lock() - defer s.mu.Unlock() - s.countNB(m, t, n) -} - -func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { - for _, x := range listFlattenType(m, t) { - s.counters[x] = 0 - s.bytesCounter[x] = 0 - } -} - -func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { - for _, x := range listFlattenType(m, t) { - count += s.counters[x] - bytes += s.bytesCounter[x] - } - return -} - -func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.emulatedError[flattenType(m, t)] - if err != nil { - return emulatedError{err} - } - return nil -} - -func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedError[x] = err - } -} - -func (s *Storage) stall(m StorageMode, t storage.FileType) { - x := flattenType(m, t) - s.mu.Lock() - defer s.mu.Unlock() - for s.stalled[x] { - s.stallCond.Wait() - } -} - -func (s *Storage) Stall(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = true - } -} - -func (s *Storage) Release(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = false - } - s.stallCond.Broadcast() -} - -func NewStorage() *Storage { - var stor storage.Storage - var closeFn func() error - if storageUseFS { - for { - storageMu.Lock() - num := storageNum - storageNum++ - storageMu.Unlock() - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); os.IsNotExist(err) { - stor, err = storage.OpenFile(path) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) - closeFn = func() error { - if storageKeepFS { - return nil - } - return os.RemoveAll(path) - } - break - } - } - } else { - stor = storage.NewMemStorage() - } - s := &Storage{ - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - } - s.stallCond.L = &s.mu - return s -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go deleted file mode 100644 index 97c5294..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "flag" - "math/rand" - "reflect" - "sync" - - "github.com/onsi/ginkgo/config" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -var ( - runfn = make(map[string][]func()) - runmu sync.Mutex -) - -func Defer(args ...interface{}) bool { - var ( - group string - fn func() - ) - for _, arg := range args { - v := reflect.ValueOf(arg) - switch v.Kind() { - case reflect.String: - group = v.String() - case reflect.Func: - r := reflect.ValueOf(&fn).Elem() - r.Set(v) - } - } - if fn != nil { - runmu.Lock() - runfn[group] = append(runfn[group], fn) - runmu.Unlock() - } - return true -} - -func RunDefer(groups ...string) bool { - if len(groups) == 0 { - groups = append(groups, "") - } - runmu.Lock() - var runfn_ []func() - for _, group := range groups { - runfn_ = append(runfn_, runfn[group]...) - delete(runfn, group) - } - runmu.Unlock() - for _, fn := range runfn_ { - fn() - } - return runfn_ != nil -} - -func RandomSeed() int64 { - if !flag.Parsed() { - panic("random seed not initialized") - } - return config.GinkgoConfig.RandomSeed -} - -func NewRand() *rand.Rand { - return rand.New(rand.NewSource(RandomSeed())) -} - -var cmp = comparer.DefaultComparer - -func BytesSeparator(a, b []byte) []byte { - if bytes.Equal(a, b) { - return b - } - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && (a[i] == b[i]); i++ { - } - x := append([]byte{}, a[:i]...) - if i < n { - if c := a[i] + 1; c < b[i] { - return append(x, c) - } - x = append(x, a[i]) - i++ - } - for ; i < len(a); i++ { - if c := a[i]; c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - if len(b) > i && b[i] > 0 { - return append(x, b[i]-1) - } - return append(x, 'x') -} - -func BytesAfter(b []byte) []byte { - var x []byte - for _, c := range b { - if c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - return append(x, 'x') -} - -func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - fn(rnd.Intn(n)) - } - return -} - -func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - for _, i := range rnd.Perm(n) { - fn(i) - } - } - return -} - -func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - start := rnd.Intn(n) - length := 0 - if j := n - start; j > 0 { - length = rnd.Intn(j) - } - fn(start, start+length) - } - return -} - -func Max(x, y int) int { - if x > y { - return x - } - return y -} - -func Min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go deleted file mode 100644 index 25bf2b2..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type testingDB struct { - *DB - ro *opt.ReadOptions - wo *opt.WriteOptions - stor *testutil.Storage -} - -func (t *testingDB) TestPut(key []byte, value []byte) error { - return t.Put(key, value, t.wo) -} - -func (t *testingDB) TestDelete(key []byte) error { - return t.Delete(key, t.wo) -} - -func (t *testingDB) TestGet(key []byte) (value []byte, err error) { - return t.Get(key, t.ro) -} - -func (t *testingDB) TestHas(key []byte) (ret bool, err error) { - return t.Has(key, t.ro) -} - -func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.NewIterator(slice, t.ro) -} - -func (t *testingDB) TestClose() { - err := t.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - err = t.stor.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) -} - -func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { - stor := testutil.NewStorage() - db, err := Open(stor, o) - // FIXME: This may be called from outside It, which may cause panic. - Expect(err).NotTo(HaveOccurred()) - return &testingDB{ - DB: db, - ro: ro, - wo: wo, - stor: stor, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go deleted file mode 100644 index 87d9673..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "io" - "math/rand" - "runtime" - "testing" -) - -const N = 10000 // make this bigger for a larger (and slower) test -var data string // test data for write tests -var testBytes []byte // test data; same as data but as a slice. - -func init() { - testBytes = make([]byte, N) - for i := 0; i < N; i++ { - testBytes[i] = 'a' + byte(i%26) - } - data = string(testBytes) -} - -// Verify that contents of buf match the string s. -func check(t *testing.T, testname string, buf *Buffer, s string) { - bytes := buf.Bytes() - str := buf.String() - if buf.Len() != len(bytes) { - t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) - } - - if buf.Len() != len(str) { - t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) - } - - if buf.Len() != len(s) { - t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) - } - - if string(bytes) != s { - t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) - } -} - -// Fill buf through n writes of byte slice fub. -// The initial contents of buf corresponds to the string s; -// the result is the final contents of buf returned as a string. -func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { - check(t, testname+" (fill 1)", buf, s) - for ; n > 0; n-- { - m, err := buf.Write(fub) - if m != len(fub) { - t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) - } - if err != nil { - t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) - } - s += string(fub) - check(t, testname+" (fill 4)", buf, s) - } - return s -} - -func TestNewBuffer(t *testing.T) { - buf := NewBuffer(testBytes) - check(t, "NewBuffer", buf, data) -} - -// Empty buf through repeated reads into fub. -// The initial contents of buf corresponds to the string s. -func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { - check(t, testname+" (empty 1)", buf, s) - - for { - n, err := buf.Read(fub) - if n == 0 { - break - } - if err != nil { - t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) - } - s = s[n:] - check(t, testname+" (empty 3)", buf, s) - } - - check(t, testname+" (empty 4)", buf, "") -} - -func TestBasicOperations(t *testing.T) { - var buf Buffer - - for i := 0; i < 5; i++ { - check(t, "TestBasicOperations (1)", &buf, "") - - buf.Reset() - check(t, "TestBasicOperations (2)", &buf, "") - - buf.Truncate(0) - check(t, "TestBasicOperations (3)", &buf, "") - - n, err := buf.Write([]byte(data[0:1])) - if n != 1 { - t.Errorf("wrote 1 byte, but n == %d", n) - } - if err != nil { - t.Errorf("err should always be nil, but err == %s", err) - } - check(t, "TestBasicOperations (4)", &buf, "a") - - buf.WriteByte(data[1]) - check(t, "TestBasicOperations (5)", &buf, "ab") - - n, err = buf.Write([]byte(data[2:26])) - if n != 24 { - t.Errorf("wrote 25 bytes, but n == %d", n) - } - check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) - - buf.Truncate(26) - check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) - - buf.Truncate(20) - check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) - - empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) - empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) - - buf.WriteByte(data[1]) - c, err := buf.ReadByte() - if err != nil { - t.Error("ReadByte unexpected eof") - } - if c != data[1] { - t.Errorf("ReadByte wrong value c=%v", c) - } - c, err = buf.ReadByte() - if err == nil { - t.Error("ReadByte unexpected not eof") - } - } -} - -func TestLargeByteWrites(t *testing.T) { - var buf Buffer - limit := 30 - if testing.Short() { - limit = 9 - } - for i := 3; i < limit; i += 3 { - s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) - empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) - } - check(t, "TestLargeByteWrites (3)", &buf, "") -} - -func TestLargeByteReads(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) - } - check(t, "TestLargeByteReads (3)", &buf, "") -} - -func TestMixedReadsAndWrites(t *testing.T) { - var buf Buffer - s := "" - for i := 0; i < 50; i++ { - wlen := rand.Intn(len(data)) - s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) - rlen := rand.Intn(len(data)) - fub := make([]byte, rlen) - n, _ := buf.Read(fub) - s = s[n:] - } - empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) -} - -func TestNil(t *testing.T) { - var b *Buffer - if b.String() != "" { - t.Errorf("expected ; got %q", b.String()) - } -} - -func TestReadFrom(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - b.ReadFrom(&buf) - empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) - } -} - -func TestWriteTo(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - buf.WriteTo(&b) - empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) - } -} - -func TestNext(t *testing.T) { - b := []byte{0, 1, 2, 3, 4} - tmp := make([]byte, 5) - for i := 0; i <= 5; i++ { - for j := i; j <= 5; j++ { - for k := 0; k <= 6; k++ { - // 0 <= i <= j <= 5; 0 <= k <= 6 - // Check that if we start with a buffer - // of length j at offset i and ask for - // Next(k), we get the right bytes. - buf := NewBuffer(b[0:j]) - n, _ := buf.Read(tmp[0:i]) - if n != i { - t.Fatalf("Read %d returned %d", i, n) - } - bb := buf.Next(k) - want := k - if want > j-i { - want = j - i - } - if len(bb) != want { - t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) - } - for l, v := range bb { - if v != byte(l+i) { - t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) - } - } - } - } - } -} - -var readBytesTests = []struct { - buffer string - delim byte - expected []string - err error -}{ - {"", 0, []string{""}, io.EOF}, - {"a\x00", 0, []string{"a\x00"}, nil}, - {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, - {"hello\x01world", 1, []string{"hello\x01"}, nil}, - {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, - {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, - {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, -} - -func TestReadBytes(t *testing.T) { - for _, test := range readBytesTests { - buf := NewBuffer([]byte(test.buffer)) - var err error - for _, expected := range test.expected { - var bytes []byte - bytes, err = buf.ReadBytes(test.delim) - if string(bytes) != expected { - t.Errorf("expected %q, got %q", expected, bytes) - } - if err != nil { - break - } - } - if err != test.err { - t.Errorf("expected error %v, got %v", test.err, err) - } - } -} - -func TestGrow(t *testing.T) { - x := []byte{'x'} - y := []byte{'y'} - tmp := make([]byte, 72) - for _, startLen := range []int{0, 100, 1000, 10000, 100000} { - xBytes := bytes.Repeat(x, startLen) - for _, growLen := range []int{0, 100, 1000, 10000, 100000} { - buf := NewBuffer(xBytes) - // If we read, this affects buf.off, which is good to test. - readBytes, _ := buf.Read(tmp) - buf.Grow(growLen) - yBytes := bytes.Repeat(y, growLen) - // Check no allocation occurs in write, as long as we're single-threaded. - var m1, m2 runtime.MemStats - runtime.ReadMemStats(&m1) - buf.Write(yBytes) - runtime.ReadMemStats(&m2) - if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { - t.Errorf("allocation occurred during write") - } - // Check that buffer has correct data. - if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { - t.Errorf("bad initial data at %d %d", startLen, growLen) - } - if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { - t.Errorf("bad written data at %d %d", startLen, growLen) - } - } - } -} - -// Was a bug: used to give EOF reading empty slice at EOF. -func TestReadEmptyAtEOF(t *testing.T) { - b := new(Buffer) - slice := make([]byte, 0) - n, err := b.Read(slice) - if err != nil { - t.Errorf("read error: %v", err) - } - if n != 0 { - t.Errorf("wrong count; got %d want 0", n) - } -} - -// Tests that we occasionally compact. Issue 5154. -func TestBufferGrowth(t *testing.T) { - var b Buffer - buf := make([]byte, 1024) - b.Write(buf[0:1]) - var cap0 int - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - if i == 0 { - cap0 = cap(b.buf) - } - } - cap1 := cap(b.buf) - // (*Buffer).grow allows for 2x capacity slop before sliding, - // so set our error threshold at 3x. - if cap1 > cap0*3 { - t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) - } -} - -// From Issue 5154. -func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf[0:1]) - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - } - } -} - -// Check that we don't compact too often. From Issue 5154. -func BenchmarkBufferFullSmallReads(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf) - for b.Len()+20 < cap(b.buf) { - b.Write(buf[:10]) - } - for i := 0; i < 5<<10; i++ { - b.Read(buf[:1]) - b.Write(buf[:1]) - } - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go deleted file mode 100644 index 1f7fdd4..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package util - -import ( - "sync" -) - -type Pool struct { - sync.Pool -} - -func NewPool(cap int) *Pool { - return &Pool{} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go deleted file mode 100644 index 27b8d03..0000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.3 - -package util - -type Pool struct { - pool chan interface{} -} - -func (p *Pool) Get() interface{} { - select { - case x := <-p.pool: - return x - default: - return nil - } -} - -func (p *Pool) Put(x interface{}) { - select { - case p.pool <- x: - default: - } -} - -func NewPool(cap int) *Pool { - return &Pool{pool: make(chan interface{}, cap)} -} diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go deleted file mode 100644 index 205dffa..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bufio" - "bytes" - "encoding/hex" - "math" - "os" - "regexp" - "strings" - "testing" -) - -func TestCborIndefiniteLength(t *testing.T) { - oldMapType := testCborH.MapType - defer func() { - testCborH.MapType = oldMapType - }() - testCborH.MapType = testMapStrIntfTyp - // var ( - // M1 map[string][]byte - // M2 map[uint64]bool - // L1 []interface{} - // S1 []string - // B1 []byte - // ) - var v, vv interface{} - // define it (v), encode it using indefinite lengths, decode it (vv), compare v to vv - v = map[string]interface{}{ - "one-byte-key": []byte{1, 2, 3, 4, 5, 6}, - "two-string-key": "two-value", - "three-list-key": []interface{}{true, false, uint64(1), int64(-1)}, - } - var buf bytes.Buffer - // buf.Reset() - e := NewEncoder(&buf, testCborH) - buf.WriteByte(cborBdIndefiniteMap) - //---- - buf.WriteByte(cborBdIndefiniteString) - e.MustEncode("one-") - e.MustEncode("byte-") - e.MustEncode("key") - buf.WriteByte(cborBdBreak) - - buf.WriteByte(cborBdIndefiniteBytes) - e.MustEncode([]byte{1, 2, 3}) - e.MustEncode([]byte{4, 5, 6}) - buf.WriteByte(cborBdBreak) - - //---- - buf.WriteByte(cborBdIndefiniteString) - e.MustEncode("two-") - e.MustEncode("string-") - e.MustEncode("key") - buf.WriteByte(cborBdBreak) - - buf.WriteByte(cborBdIndefiniteString) - e.MustEncode([]byte("two-")) // encode as bytes, to check robustness of code - e.MustEncode([]byte("value")) - buf.WriteByte(cborBdBreak) - - //---- - buf.WriteByte(cborBdIndefiniteString) - e.MustEncode("three-") - e.MustEncode("list-") - e.MustEncode("key") - buf.WriteByte(cborBdBreak) - - buf.WriteByte(cborBdIndefiniteArray) - e.MustEncode(true) - e.MustEncode(false) - e.MustEncode(uint64(1)) - e.MustEncode(int64(-1)) - buf.WriteByte(cborBdBreak) - - buf.WriteByte(cborBdBreak) // close map - - NewDecoderBytes(buf.Bytes(), testCborH).MustDecode(&vv) - if err := deepEqual(v, vv); err != nil { - logT(t, "-------- Before and After marshal do not match: Error: %v", err) - logT(t, " ....... GOLDEN: (%T) %#v", v, v) - logT(t, " ....... DECODED: (%T) %#v", vv, vv) - failT(t) - } -} - -type testCborGolden struct { - Base64 string `codec:"cbor"` - Hex string `codec:"hex"` - Roundtrip bool `codec:"roundtrip"` - Decoded interface{} `codec:"decoded"` - Diagnostic string `codec:"diagnostic"` - Skip bool `codec:"skip"` -} - -// Some tests are skipped because they include numbers outside the range of int64/uint64 -func doTestCborGoldens(t *testing.T) { - oldMapType := testCborH.MapType - defer func() { - testCborH.MapType = oldMapType - }() - testCborH.MapType = testMapStrIntfTyp - // decode test-cbor-goldens.json into a list of []*testCborGolden - // for each one, - // - decode hex into []byte bs - // - decode bs into interface{} v - // - compare both using deepequal - // - for any miss, record it - var gs []*testCborGolden - f, err := os.Open("test-cbor-goldens.json") - if err != nil { - logT(t, "error opening test-cbor-goldens.json: %v", err) - failT(t) - } - defer f.Close() - jh := new(JsonHandle) - jh.MapType = testMapStrIntfTyp - // d := NewDecoder(f, jh) - d := NewDecoder(bufio.NewReader(f), jh) - // err = d.Decode(&gs) - d.MustDecode(&gs) - if err != nil { - logT(t, "error json decoding test-cbor-goldens.json: %v", err) - failT(t) - } - - tagregex := regexp.MustCompile(`[\d]+\(.+?\)`) - hexregex := regexp.MustCompile(`h'([0-9a-fA-F]*)'`) - for i, g := range gs { - // fmt.Printf("%v, skip: %v, isTag: %v, %s\n", i, g.Skip, tagregex.MatchString(g.Diagnostic), g.Diagnostic) - // skip tags or simple or those with prefix, as we can't verify them. - if g.Skip || strings.HasPrefix(g.Diagnostic, "simple(") || tagregex.MatchString(g.Diagnostic) { - // fmt.Printf("%v: skipped\n", i) - logT(t, "[%v] skipping because skip=true OR unsupported simple value or Tag Value", i) - continue - } - // println("++++++++++++", i, "g.Diagnostic", g.Diagnostic) - if hexregex.MatchString(g.Diagnostic) { - // println(i, "g.Diagnostic matched hex") - if s2 := g.Diagnostic[2 : len(g.Diagnostic)-1]; s2 == "" { - g.Decoded = zeroByteSlice - } else if bs2, err2 := hex.DecodeString(s2); err2 == nil { - g.Decoded = bs2 - } - // fmt.Printf("%v: hex: %v\n", i, g.Decoded) - } - bs, err := hex.DecodeString(g.Hex) - if err != nil { - logT(t, "[%v] error hex decoding %s [%v]: %v", i, g.Hex, err) - failT(t) - } - var v interface{} - NewDecoderBytes(bs, testCborH).MustDecode(&v) - if _, ok := v.(RawExt); ok { - continue - } - // check the diagnostics to compare - switch g.Diagnostic { - case "Infinity": - b := math.IsInf(v.(float64), 1) - testCborError(t, i, math.Inf(1), v, nil, &b) - case "-Infinity": - b := math.IsInf(v.(float64), -1) - testCborError(t, i, math.Inf(-1), v, nil, &b) - case "NaN": - // println(i, "checking NaN") - b := math.IsNaN(v.(float64)) - testCborError(t, i, math.NaN(), v, nil, &b) - case "undefined": - b := v == nil - testCborError(t, i, nil, v, nil, &b) - default: - v0 := g.Decoded - // testCborCoerceJsonNumber(reflect.ValueOf(&v0)) - testCborError(t, i, v0, v, deepEqual(v0, v), nil) - } - } -} - -func testCborError(t *testing.T, i int, v0, v1 interface{}, err error, equal *bool) { - if err == nil && equal == nil { - // fmt.Printf("%v testCborError passed (err and equal nil)\n", i) - return - } - if err != nil { - logT(t, "[%v] deepEqual error: %v", i, err) - logT(t, " ....... GOLDEN: (%T) %#v", v0, v0) - logT(t, " ....... DECODED: (%T) %#v", v1, v1) - failT(t) - } - if equal != nil && !*equal { - logT(t, "[%v] values not equal", i) - logT(t, " ....... GOLDEN: (%T) %#v", v0, v0) - logT(t, " ....... DECODED: (%T) %#v", v1, v1) - failT(t) - } - // fmt.Printf("%v testCborError passed (checks passed)\n", i) -} - -func TestCborGoldens(t *testing.T) { - doTestCborGoldens(t) -} diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go deleted file mode 100644 index cd93556..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go +++ /dev/null @@ -1,1117 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// Test works by using a slice of interfaces. -// It can test for encoding/decoding into/from a nil interface{} -// or passing the object to encode/decode into. -// -// There are basically 2 main tests here. -// First test internally encodes and decodes things and verifies that -// the artifact was as expected. -// Second test will use python msgpack to create a bunch of golden files, -// read those files, and compare them to what it should be. It then -// writes those files back out and compares the byte streams. -// -// Taken together, the tests are pretty extensive. -// -// The following manual tests must be done: -// - TestCodecUnderlyingType -// - Set fastpathEnabled to false and run tests (to ensure that regular reflection works). -// We don't want to use a variable there so that code is ellided. - -import ( - "bytes" - "encoding/gob" - "flag" - "fmt" - "io/ioutil" - "math" - "net" - "net/rpc" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "strconv" - "sync/atomic" - "testing" - "time" -) - -func init() { - testInitFlags() - testPreInitFns = append(testPreInitFns, testInit) -} - -type testVerifyArg int - -const ( - testVerifyMapTypeSame testVerifyArg = iota - testVerifyMapTypeStrIntf - testVerifyMapTypeIntfIntf - // testVerifySliceIntf - testVerifyForPython -) - -const testSkipRPCTests = false - -var ( - testVerbose bool - testInitDebug bool - testUseIoEncDec bool - testStructToArray bool - testCanonical bool - testWriteNoSymbols bool - testSkipIntf bool - - skipVerifyVal interface{} = &(struct{}{}) - - testMapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - timeLoc = time.FixedZone("", -8*60*60) // UTC-08:00 //time.UTC-8 - timeToCompare1 = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc).UTC() - timeToCompare2 = time.Date(1900, 2, 2, 2, 2, 2, 2000, timeLoc).UTC() - timeToCompare3 = time.Unix(0, 270).UTC() // use value that must be encoded as uint64 for nanoseconds (for cbor/msgpack comparison) - //timeToCompare4 = time.Time{}.UTC() // does not work well with simple cbor time encoding (overflow) - timeToCompare4 = time.Unix(-2013855848, 4223).UTC() - - table []interface{} // main items we encode - tableVerify []interface{} // we verify encoded things against this after decode - tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different) - tablePythonVerify []interface{} // for verifying for python, since Python sometimes - // will encode a float32 as float64, or large int as uint - testRpcInt = new(TestRpcInt) -) - -func testInitFlags() { - // delete(testDecOpts.ExtFuncs, timeTyp) - flag.BoolVar(&testVerbose, "tv", false, "Test Verbose") - flag.BoolVar(&testInitDebug, "tg", false, "Test Init Debug") - flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal") - flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option") - flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option") - flag.BoolVar(&testCanonical, "tc", false, "Set Canonical option") - flag.BoolVar(&testSkipIntf, "tf", false, "Skip Interfaces") -} - -type TestABC struct { - A, B, C string -} - -type TestRpcInt struct { - i int -} - -func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil } -func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil } -func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil } -func (r *TestRpcInt) EchoStruct(arg TestABC, res *string) error { - *res = fmt.Sprintf("%#v", arg) - return nil -} -func (r *TestRpcInt) Echo123(args []string, res *string) error { - *res = fmt.Sprintf("%#v", args) - return nil -} - -type testUnixNanoTimeExt struct{} - -func (x testUnixNanoTimeExt) WriteExt(interface{}) []byte { panic("unsupported") } -func (x testUnixNanoTimeExt) ReadExt(interface{}, []byte) { panic("unsupported") } -func (x testUnixNanoTimeExt) ConvertExt(v interface{}) interface{} { - switch v2 := v.(type) { - case time.Time: - return v2.UTC().UnixNano() - case *time.Time: - return v2.UTC().UnixNano() - default: - panic(fmt.Sprintf("unsupported format for time conversion: expecting time.Time; got %T", v)) - } -} -func (x testUnixNanoTimeExt) UpdateExt(dest interface{}, v interface{}) { - // fmt.Printf("testUnixNanoTimeExt.UpdateExt: v: %v\n", v) - tt := dest.(*time.Time) - switch v2 := v.(type) { - case int64: - *tt = time.Unix(0, v2).UTC() - case uint64: - *tt = time.Unix(0, int64(v2)).UTC() - //case float64: - //case string: - default: - panic(fmt.Sprintf("unsupported format for time conversion: expecting int64/uint64; got %T", v)) - } - // fmt.Printf("testUnixNanoTimeExt.UpdateExt: v: %v, tt: %#v\n", v, tt) -} - -func testVerifyVal(v interface{}, arg testVerifyArg) (v2 interface{}) { - //for python msgpack, - // - all positive integers are unsigned 64-bit ints - // - all floats are float64 - switch iv := v.(type) { - case int8: - if iv >= 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int16: - if iv >= 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int32: - if iv >= 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int64: - if iv >= 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case uint8: - v2 = uint64(iv) - case uint16: - v2 = uint64(iv) - case uint32: - v2 = uint64(iv) - case uint64: - v2 = uint64(iv) - case float32: - v2 = float64(iv) - case float64: - v2 = float64(iv) - case []interface{}: - m2 := make([]interface{}, len(iv)) - for j, vj := range iv { - m2[j] = testVerifyVal(vj, arg) - } - v2 = m2 - case map[string]bool: - switch arg { - case testVerifyMapTypeSame: - m2 := make(map[string]bool) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - case testVerifyMapTypeStrIntf, testVerifyForPython: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - case testVerifyMapTypeIntfIntf: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - } - case map[string]interface{}: - switch arg { - case testVerifyMapTypeSame: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - case testVerifyMapTypeStrIntf, testVerifyForPython: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - case testVerifyMapTypeIntfIntf: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - } - case map[interface{}]interface{}: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[testVerifyVal(kj, arg)] = testVerifyVal(kv, arg) - } - v2 = m2 - case time.Time: - switch arg { - case testVerifyForPython: - if iv2 := iv.UnixNano(); iv2 >= 0 { - v2 = uint64(iv2) - } else { - v2 = int64(iv2) - } - default: - v2 = v - } - default: - v2 = v - } - return -} - -func testInit() { - gob.Register(new(TestStruc)) - if testInitDebug { - ts0 := newTestStruc(2, false, !testSkipIntf, false) - fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0) - } - - testJsonH.Canonical = testCanonical - testCborH.Canonical = testCanonical - testSimpleH.Canonical = testCanonical - testBincH.Canonical = testCanonical - testMsgpackH.Canonical = testCanonical - - testJsonH.StructToArray = testStructToArray - testCborH.StructToArray = testStructToArray - testSimpleH.StructToArray = testStructToArray - testBincH.StructToArray = testStructToArray - testMsgpackH.StructToArray = testStructToArray - - testMsgpackH.RawToString = true - - if testWriteNoSymbols { - testBincH.AsSymbols = AsSymbolNone - } else { - testBincH.AsSymbols = AsSymbolAll - } - - // testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt) - // testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt) - timeEncExt := func(rv reflect.Value) (bs []byte, err error) { - switch v2 := rv.Interface().(type) { - case time.Time: - bs = encodeTime(v2) - case *time.Time: - bs = encodeTime(*v2) - default: - err = fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2) - } - return - } - timeDecExt := func(rv reflect.Value, bs []byte) (err error) { - tt, err := decodeTime(bs) - if err == nil { - *(rv.Interface().(*time.Time)) = tt - } - return - } - - // add extensions for msgpack, simple for time.Time, so we can encode/decode same way. - testMsgpackH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) - testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) - testCborH.SetExt(timeTyp, 1, &testUnixNanoTimeExt{}) - testJsonH.SetExt(timeTyp, 1, &testUnixNanoTimeExt{}) - - primitives := []interface{}{ - int8(-8), - int16(-1616), - int32(-32323232), - int64(-6464646464646464), - uint8(192), - uint16(1616), - uint32(32323232), - uint64(6464646464646464), - byte(192), - float32(-3232.0), - float64(-6464646464.0), - float32(3232.0), - float64(6464646464.0), - false, - true, - nil, - "someday", - "", - "bytestring", - timeToCompare1, - timeToCompare2, - timeToCompare3, - timeToCompare4, - } - mapsAndStrucs := []interface{}{ - map[string]bool{ - "true": true, - "false": false, - }, - map[string]interface{}{ - "true": "True", - "false": false, - "uint16(1616)": uint16(1616), - }, - //add a complex combo map in here. (map has list which has map) - //note that after the first thing, everything else should be generic. - map[string]interface{}{ - "list": []interface{}{ - int16(1616), - int32(32323232), - true, - float32(-3232.0), - map[string]interface{}{ - "TRUE": true, - "FALSE": false, - }, - []interface{}{true, false}, - }, - "int32": int32(32323232), - "bool": true, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890", - }, - map[interface{}]interface{}{ - true: "true", - uint8(138): false, - "false": uint8(200), - }, - newTestStruc(0, false, !testSkipIntf, false), - } - - table = []interface{}{} - table = append(table, primitives...) //0-19 are primitives - table = append(table, primitives) //20 is a list of primitives - table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct - - tableVerify = make([]interface{}, len(table)) - tableTestNilVerify = make([]interface{}, len(table)) - tablePythonVerify = make([]interface{}, len(table)) - - lp := len(primitives) - av := tableVerify - for i, v := range table { - if i == lp+3 { - av[i] = skipVerifyVal - continue - } - //av[i] = testVerifyVal(v, testVerifyMapTypeSame) - switch v.(type) { - case []interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - case map[string]interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - case map[interface{}]interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - default: - av[i] = v - } - } - - av = tableTestNilVerify - for i, v := range table { - if i > lp+3 { - av[i] = skipVerifyVal - continue - } - av[i] = testVerifyVal(v, testVerifyMapTypeStrIntf) - } - - av = tablePythonVerify - for i, v := range table { - if i > lp+3 { - av[i] = skipVerifyVal - continue - } - av[i] = testVerifyVal(v, testVerifyForPython) - } - - tablePythonVerify = tablePythonVerify[:24] -} - -func testUnmarshal(v interface{}, data []byte, h Handle) (err error) { - if testUseIoEncDec { - NewDecoder(bytes.NewBuffer(data), h).MustDecode(v) - } else { - NewDecoderBytes(data, h).MustDecode(v) - } - return -} - -func testMarshal(v interface{}, h Handle) (bs []byte, err error) { - if testUseIoEncDec { - var buf bytes.Buffer - NewEncoder(&buf, h).MustEncode(v) - bs = buf.Bytes() - return - } - NewEncoderBytes(&bs, h).MustEncode(v) - return -} - -func testMarshalErr(v interface{}, h Handle, t *testing.T, name string) (bs []byte, err error) { - if bs, err = testMarshal(v, h); err != nil { - logT(t, "Error encoding %s: %v, Err: %v", name, v, err) - t.FailNow() - } - return -} - -func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name string) (err error) { - if err = testUnmarshal(v, data, h); err != nil { - logT(t, "Error Decoding into %s: %v, Err: %v", name, v, err) - t.FailNow() - } - return -} - -// doTestCodecTableOne allows us test for different variations based on arguments passed. -func doTestCodecTableOne(t *testing.T, testNil bool, h Handle, - vs []interface{}, vsVerify []interface{}) { - //if testNil, then just test for when a pointer to a nil interface{} is passed. It should work. - //Current setup allows us test (at least manually) the nil interface or typed interface. - logT(t, "================ TestNil: %v ================\n", testNil) - for i, v0 := range vs { - logT(t, "..............................................") - logT(t, " Testing: #%d:, %T, %#v\n", i, v0, v0) - b0, err := testMarshalErr(v0, h, t, "v0") - if err != nil { - continue - } - if h.isBinary() { - logT(t, " Encoded bytes: len: %v, %v\n", len(b0), b0) - } else { - logT(t, " Encoded string: len: %v, %v\n", len(string(b0)), string(b0)) - // println("########### encoded string: " + string(b0)) - } - var v1 interface{} - - if testNil { - err = testUnmarshal(&v1, b0, h) - } else { - if v0 != nil { - v0rt := reflect.TypeOf(v0) // ptr - rv1 := reflect.New(v0rt) - err = testUnmarshal(rv1.Interface(), b0, h) - v1 = rv1.Elem().Interface() - // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() - } - } - - logT(t, " v1 returned: %T, %#v", v1, v1) - // if v1 != nil { - // logT(t, " v1 returned: %T, %#v", v1, v1) - // //we always indirect, because ptr to typed value may be passed (if not testNil) - // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() - // } - if err != nil { - logT(t, "-------- Error: %v. Partial return: %v", err, v1) - failT(t) - continue - } - v0check := vsVerify[i] - if v0check == skipVerifyVal { - logT(t, " Nil Check skipped: Decoded: %T, %#v\n", v1, v1) - continue - } - - if err = deepEqual(v0check, v1); err == nil { - logT(t, "++++++++ Before and After marshal matched\n") - } else { - // logT(t, "-------- Before and After marshal do not match: Error: %v"+ - // " ====> GOLDEN: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1) - logT(t, "-------- Before and After marshal do not match: Error: %v", err) - logT(t, " ....... GOLDEN: (%T) %#v", v0check, v0check) - logT(t, " ....... DECODED: (%T) %#v", v1, v1) - failT(t) - } - } -} - -func testCodecTableOne(t *testing.T, h Handle) { - testOnce.Do(testInitAll) - // func TestMsgpackAllExperimental(t *testing.T) { - // dopts := testDecOpts(nil, nil, false, true, true), - - idxTime, numPrim, numMap := 19, 23, 4 - //println("#################") - switch v := h.(type) { - case *MsgpackHandle: - var oldWriteExt, oldRawToString bool - oldWriteExt, v.WriteExt = v.WriteExt, true - oldRawToString, v.RawToString = v.RawToString, true - doTestCodecTableOne(t, false, h, table, tableVerify) - v.WriteExt, v.RawToString = oldWriteExt, oldRawToString - case *JsonHandle: - //skip []interface{} containing time.Time, as it encodes as a number, but cannot decode back to time.Time. - //As there is no real support for extension tags in json, this must be skipped. - doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) - doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) - default: - doTestCodecTableOne(t, false, h, table, tableVerify) - } - // func TestMsgpackAll(t *testing.T) { - - // //skip []interface{} containing time.Time - // doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) - // doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) - // func TestMsgpackNilStringMap(t *testing.T) { - var oldMapType reflect.Type - v := h.getBasicHandle() - - oldMapType, v.MapType = v.MapType, testMapStrIntfTyp - - //skip time.Time, []interface{} containing time.Time, last map, and newStruc - doTestCodecTableOne(t, true, h, table[:idxTime], tableTestNilVerify[:idxTime]) - doTestCodecTableOne(t, true, h, table[numPrim+1:numPrim+numMap], tableTestNilVerify[numPrim+1:numPrim+numMap]) - - v.MapType = oldMapType - - // func TestMsgpackNilIntf(t *testing.T) { - - //do newTestStruc and last element of map - doTestCodecTableOne(t, true, h, table[numPrim+numMap:], tableTestNilVerify[numPrim+numMap:]) - //TODO? What is this one? - //doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18]) -} - -func testCodecMiscOne(t *testing.T, h Handle) { - testOnce.Do(testInitAll) - b, err := testMarshalErr(32, h, t, "32") - // Cannot do this nil one, because faster type assertion decoding will panic - // var i *int32 - // if err = testUnmarshal(b, i, nil); err == nil { - // logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr") - // t.FailNow() - // } - var i2 int32 = 0 - err = testUnmarshalErr(&i2, b, h, t, "int32-ptr") - if i2 != int32(32) { - logT(t, "------- didn't unmarshal to 32: Received: %d", i2) - t.FailNow() - } - - // func TestMsgpackDecodePtr(t *testing.T) { - ts := newTestStruc(0, false, !testSkipIntf, false) - b, err = testMarshalErr(ts, h, t, "pointer-to-struct") - if len(b) < 40 { - logT(t, "------- Size must be > 40. Size: %d", len(b)) - t.FailNow() - } - if h.isBinary() { - logT(t, "------- b: %v", b) - } else { - logT(t, "------- b: %s", b) - } - ts2 := new(TestStruc) - err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct") - if ts2.I64 != math.MaxInt64*2/3 { - logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64) - t.FailNow() - } - - // func TestMsgpackIntfDecode(t *testing.T) { - m := map[string]int{"A": 2, "B": 3} - p := []interface{}{m} - bs, err := testMarshalErr(p, h, t, "p") - - m2 := map[string]int{} - p2 := []interface{}{m2} - err = testUnmarshalErr(&p2, bs, h, t, "&p2") - - if m2["A"] != 2 || m2["B"] != 3 { - logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2) - t.FailNow() - } - // log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2) - checkEqualT(t, p, p2, "p=p2") - checkEqualT(t, m, m2, "m=m2") - if err = deepEqual(p, p2); err == nil { - logT(t, "p and p2 match") - } else { - logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2) - t.FailNow() - } - if err = deepEqual(m, m2); err == nil { - logT(t, "m and m2 match") - } else { - logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2) - t.FailNow() - } - - // func TestMsgpackDecodeStructSubset(t *testing.T) { - // test that we can decode a subset of the stream - mm := map[string]interface{}{"A": 5, "B": 99, "C": 333} - bs, err = testMarshalErr(mm, h, t, "mm") - type ttt struct { - A uint8 - C int32 - } - var t2 ttt - testUnmarshalErr(&t2, bs, h, t, "t2") - t3 := ttt{5, 333} - checkEqualT(t, t2, t3, "t2=t3") - - // println(">>>>>") - // test simple arrays, non-addressable arrays, slices - type tarr struct { - A int64 - B [3]int64 - C []byte - D [3]byte - } - var tarr0 = tarr{1, [3]int64{2, 3, 4}, []byte{4, 5, 6}, [3]byte{7, 8, 9}} - // test both pointer and non-pointer (value) - for _, tarr1 := range []interface{}{tarr0, &tarr0} { - bs, err = testMarshalErr(tarr1, h, t, "tarr1") - var tarr2 tarr - testUnmarshalErr(&tarr2, bs, h, t, "tarr2") - checkEqualT(t, tarr0, tarr2, "tarr0=tarr2") - // fmt.Printf(">>>> err: %v. tarr1: %v, tarr2: %v\n", err, tarr0, tarr2) - } - - // test byte array, even if empty (msgpack only) - if h == testMsgpackH { - type ystruct struct { - Anarray []byte - } - var ya = ystruct{} - testUnmarshalErr(&ya, []byte{0x91, 0x90}, h, t, "ya") - } -} - -func testCodecEmbeddedPointer(t *testing.T, h Handle) { - testOnce.Do(testInitAll) - type Z int - type A struct { - AnInt int - } - type B struct { - *Z - *A - MoreInt int - } - var z Z = 4 - x1 := &B{&z, &A{5}, 6} - bs, err := testMarshalErr(x1, h, t, "x1") - // fmt.Printf("buf: len(%v): %x\n", buf.Len(), buf.Bytes()) - var x2 = new(B) - err = testUnmarshalErr(x2, bs, h, t, "x2") - err = checkEqualT(t, x1, x2, "x1=x2") - _ = err -} - -func testCodecUnderlyingType(t *testing.T, h Handle) { - testOnce.Do(testInitAll) - // Manual Test. - // Run by hand, with accompanying print statements in fast-path.go - // to ensure that the fast functions are called. - type T1 map[string]string - v := T1{"1": "1s", "2": "2s"} - var bs []byte - var err error - NewEncoderBytes(&bs, h).MustEncode(v) - if err != nil { - logT(t, "Error during encode: %v", err) - failT(t) - } - var v2 T1 - NewDecoderBytes(bs, h).MustDecode(&v2) - if err != nil { - logT(t, "Error during decode: %v", err) - failT(t) - } -} - -func testCodecChan(t *testing.T, h Handle) { - // - send a slice []*int64 (sl1) into an chan (ch1) with cap > len(s1) - // - encode ch1 as a stream array - // - decode a chan (ch2), with cap > len(s1) from the stream array - // - receive from ch2 into slice sl2 - // - compare sl1 and sl2 - // - do this for codecs: json, cbor (covers all types) - sl1 := make([]*int64, 4) - for i := range sl1 { - var j int64 = int64(i) - sl1[i] = &j - } - ch1 := make(chan *int64, 4) - for _, j := range sl1 { - ch1 <- j - } - var bs []byte - NewEncoderBytes(&bs, h).MustEncode(ch1) - // if !h.isBinary() { - // fmt.Printf("before: len(ch1): %v, bs: %s\n", len(ch1), bs) - // } - // var ch2 chan *int64 // this will block if json, etc. - ch2 := make(chan *int64, 8) - NewDecoderBytes(bs, h).MustDecode(&ch2) - // logT(t, "Len(ch2): %v", len(ch2)) - // fmt.Printf("after: len(ch2): %v, ch2: %v\n", len(ch2), ch2) - close(ch2) - var sl2 []*int64 - for j := range ch2 { - sl2 = append(sl2, j) - } - if err := deepEqual(sl1, sl2); err != nil { - logT(t, "Not Match: %v; len: %v, %v", err, len(sl1), len(sl2)) - failT(t) - } -} - -func testCodecRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs time.Duration, -) (port int) { - testOnce.Do(testInitAll) - if testSkipRPCTests { - return - } - // rpc needs EOF, which is sent via a panic, and so must be recovered. - if !recoverPanicToErr { - logT(t, "EXPECTED. set recoverPanicToErr=true, since rpc needs EOF") - t.FailNow() - } - srv := rpc.NewServer() - srv.Register(testRpcInt) - ln, err := net.Listen("tcp", "127.0.0.1:0") - // log("listener: %v", ln.Addr()) - checkErrT(t, err) - port = (ln.Addr().(*net.TCPAddr)).Port - // var opts *DecoderOptions - // opts := testDecOpts - // opts.MapType = mapStrIntfTyp - // opts.RawToString = false - serverExitChan := make(chan bool, 1) - var serverExitFlag uint64 = 0 - serverFn := func() { - for { - conn1, err1 := ln.Accept() - // if err1 != nil { - // //fmt.Printf("accept err1: %v\n", err1) - // continue - // } - if atomic.LoadUint64(&serverExitFlag) == 1 { - serverExitChan <- true - conn1.Close() - return // exit serverFn goroutine - } - if err1 == nil { - var sc rpc.ServerCodec = rr.ServerCodec(conn1, h) - srv.ServeCodec(sc) - } - } - } - - clientFn := func(cc rpc.ClientCodec) { - cl := rpc.NewClientWithCodec(cc) - defer cl.Close() - // defer func() { println("##### client closing"); cl.Close() }() - var up, sq, mult int - var rstr string - // log("Calling client") - checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up)) - // log("Called TestRpcInt.Update") - checkEqualT(t, testRpcInt.i, 5, "testRpcInt.i=5") - checkEqualT(t, up, 5, "up=5") - checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq)) - checkEqualT(t, sq, 25, "sq=25") - checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult)) - checkEqualT(t, mult, 100, "mult=100") - checkErrT(t, cl.Call("TestRpcInt.EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) - checkEqualT(t, rstr, fmt.Sprintf("%#v", TestABC{"Aa", "Bb", "Cc"}), "rstr=") - checkErrT(t, cl.Call("TestRpcInt.Echo123", []string{"A1", "B2", "C3"}, &rstr)) - checkEqualT(t, rstr, fmt.Sprintf("%#v", []string{"A1", "B2", "C3"}), "rstr=") - } - - connFn := func() (bs net.Conn) { - // log("calling f1") - bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String()) - //fmt.Printf("f1. bs: %v, err2: %v\n", bs, err2) - checkErrT(t, err2) - return - } - - exitFn := func() { - atomic.StoreUint64(&serverExitFlag, 1) - bs := connFn() - <-serverExitChan - bs.Close() - // serverExitChan <- true - } - - go serverFn() - runtime.Gosched() - //time.Sleep(100 * time.Millisecond) - if exitSleepMs == 0 { - defer ln.Close() - defer exitFn() - } - if doRequest { - bs := connFn() - cc := rr.ClientCodec(bs, h) - clientFn(cc) - } - if exitSleepMs != 0 { - go func() { - defer ln.Close() - time.Sleep(exitSleepMs) - exitFn() - }() - } - return -} - -// Comprehensive testing that generates data encoded from python handle (cbor, msgpack), -// and validates that our code can read and write it out accordingly. -// We keep this unexported here, and put actual test in ext_dep_test.go. -// This way, it can be excluded by excluding file completely. -func doTestPythonGenStreams(t *testing.T, name string, h Handle) { - logT(t, "TestPythonGenStreams-%v", name) - tmpdir, err := ioutil.TempDir("", "golang-"+name+"-test") - if err != nil { - logT(t, "-------- Unable to create temp directory\n") - t.FailNow() - } - defer os.RemoveAll(tmpdir) - logT(t, "tmpdir: %v", tmpdir) - cmd := exec.Command("python", "test.py", "testdata", tmpdir) - //cmd.Stdin = strings.NewReader("some input") - //cmd.Stdout = &out - var cmdout []byte - if cmdout, err = cmd.CombinedOutput(); err != nil { - logT(t, "-------- Error running test.py testdata. Err: %v", err) - logT(t, " %v", string(cmdout)) - t.FailNow() - } - - bh := h.getBasicHandle() - - oldMapType := bh.MapType - for i, v := range tablePythonVerify { - // if v == uint64(0) && h == testMsgpackH { - // v = int64(0) - // } - bh.MapType = oldMapType - //load up the golden file based on number - //decode it - //compare to in-mem object - //encode it again - //compare to output stream - logT(t, "..............................................") - logT(t, " Testing: #%d: %T, %#v\n", i, v, v) - var bss []byte - bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i)+"."+name+".golden")) - if err != nil { - logT(t, "-------- Error reading golden file: %d. Err: %v", i, err) - failT(t) - continue - } - bh.MapType = testMapStrIntfTyp - - var v1 interface{} - if err = testUnmarshal(&v1, bss, h); err != nil { - logT(t, "-------- Error decoding stream: %d: Err: %v", i, err) - failT(t) - continue - } - if v == skipVerifyVal { - continue - } - //no need to indirect, because we pass a nil ptr, so we already have the value - //if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() } - if err = deepEqual(v, v1); err == nil { - logT(t, "++++++++ Objects match: %T, %v", v, v) - } else { - logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1) - logT(t, "-------- GOLDEN: %#v", v) - // logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) - logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) - failT(t) - } - bsb, err := testMarshal(v1, h) - if err != nil { - logT(t, "Error encoding to stream: %d: Err: %v", i, err) - failT(t) - continue - } - if err = deepEqual(bsb, bss); err == nil { - logT(t, "++++++++ Bytes match") - } else { - logT(t, "???????? Bytes do not match. %v.", err) - xs := "--------" - if reflect.ValueOf(v).Kind() == reflect.Map { - xs = " " - logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs) - } else { - logT(t, "%s It's not a map. They should match.", xs) - failT(t) - } - logT(t, "%s FROM_FILE: %4d] %v", xs, len(bss), bss) - logT(t, "%s ENCODED: %4d] %v", xs, len(bsb), bsb) - } - } - bh.MapType = oldMapType -} - -// To test MsgpackSpecRpc, we test 3 scenarios: -// - Go Client to Go RPC Service (contained within TestMsgpackRpcSpec) -// - Go client to Python RPC Service (contained within doTestMsgpackRpcSpecGoClientToPythonSvc) -// - Python Client to Go RPC Service (contained within doTestMsgpackRpcSpecPythonClientToGoSvc) -// -// This allows us test the different calling conventions -// - Go Service requires only one argument -// - Python Service allows multiple arguments - -func doTestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { - if testSkipRPCTests { - return - } - openPort := "6789" - cmd := exec.Command("python", "test.py", "rpc-server", openPort, "2") - checkErrT(t, cmd.Start()) - time.Sleep(100 * time.Millisecond) // time for python rpc server to start - bs, err2 := net.Dial("tcp", ":"+openPort) - checkErrT(t, err2) - cc := MsgpackSpecRpc.ClientCodec(bs, testMsgpackH) - cl := rpc.NewClientWithCodec(cc) - defer cl.Close() - var rstr string - checkErrT(t, cl.Call("EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) - //checkEqualT(t, rstr, "{'A': 'Aa', 'B': 'Bb', 'C': 'Cc'}") - var mArgs MsgpackSpecRpcMultiArgs = []interface{}{"A1", "B2", "C3"} - checkErrT(t, cl.Call("Echo123", mArgs, &rstr)) - checkEqualT(t, rstr, "1:A1 2:B2 3:C3", "rstr=") -} - -func doTestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { - if testSkipRPCTests { - return - } - port := testCodecRpcOne(t, MsgpackSpecRpc, testMsgpackH, false, 1*time.Second) - //time.Sleep(1000 * time.Millisecond) - cmd := exec.Command("python", "test.py", "rpc-client-go-service", strconv.Itoa(port)) - var cmdout []byte - var err error - if cmdout, err = cmd.CombinedOutput(); err != nil { - logT(t, "-------- Error running test.py rpc-client-go-service. Err: %v", err) - logT(t, " %v", string(cmdout)) - t.FailNow() - } - checkEqualT(t, string(cmdout), - fmt.Sprintf("%#v\n%#v\n", []string{"A1", "B2", "C3"}, TestABC{"Aa", "Bb", "Cc"}), "cmdout=") -} - -func TestBincCodecsTable(t *testing.T) { - testCodecTableOne(t, testBincH) -} - -func TestBincCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testBincH) -} - -func TestBincCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testBincH) -} - -func TestSimpleCodecsTable(t *testing.T) { - testCodecTableOne(t, testSimpleH) -} - -func TestSimpleCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testSimpleH) -} - -func TestSimpleCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testSimpleH) -} - -func TestMsgpackCodecsTable(t *testing.T) { - testCodecTableOne(t, testMsgpackH) -} - -func TestMsgpackCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testMsgpackH) -} - -func TestMsgpackCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testMsgpackH) -} - -func TestCborCodecsTable(t *testing.T) { - testCodecTableOne(t, testCborH) -} - -func TestCborCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testCborH) -} - -func TestCborCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testCborH) -} - -func TestJsonCodecsTable(t *testing.T) { - testCodecTableOne(t, testJsonH) -} - -func TestJsonCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testJsonH) -} - -func TestJsonCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testJsonH) -} - -func TestJsonCodecChan(t *testing.T) { - testCodecChan(t, testJsonH) -} - -func TestCborCodecChan(t *testing.T) { - testCodecChan(t, testCborH) -} - -// ----- RPC ----- - -func TestBincRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testBincH, true, 0) -} - -func TestSimpleRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testSimpleH, true, 0) -} - -func TestMsgpackRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testMsgpackH, true, 0) -} - -func TestCborRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testCborH, true, 0) -} - -func TestJsonRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testJsonH, true, 0) -} - -func TestMsgpackRpcSpec(t *testing.T) { - testCodecRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0) -} - -func TestBincUnderlyingType(t *testing.T) { - testCodecUnderlyingType(t, testBincH) -} - -// TODO: -// Add Tests for: -// - decoding empty list/map in stream into a nil slice/map -// - binary(M|Unm)arsher support for time.Time (e.g. cbor encoding) -// - text(M|Unm)arshaler support for time.Time (e.g. json encoding) -// - non fast-path scenarios e.g. map[string]uint16, []customStruct. -// Expand cbor to include indefinite length stuff for this non-fast-path types. -// This may not be necessary, since we have the manual tests (fastpathEnabled=false) to test/validate with. -// - CodecSelfer -// Ensure it is called when (en|de)coding interface{} or reflect.Value (2 different codepaths). -// - interfaces: textMarshaler, binaryMarshaler, codecSelfer -// - struct tags: -// on anonymous fields, _struct (all fields), etc -// - codecgen of struct containing channels. -// -// Cleanup tests: -// - The are brittle in their handling of validation and skipping diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md b/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md deleted file mode 100644 index 3ae8a05..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# codecgen tool - -Generate is given a list of *.go files to parse, and an output file (fout), -codecgen will create an output file __file.go__ which -contains `codec.Selfer` implementations for the named types found -in the files parsed. - -Using codecgen is very straightforward. - -**Download and install the tool** - -`go get -u github.com/ugorji/go/codec/codecgen` - -**Run the tool on your files** - -The command line format is: - -`codecgen [options] (-o outfile) (infile ...)` - -```sh -% codecgen -? -Usage of codecgen: - -c="github.com/ugorji/go/codec": codec path - -o="": out file - -r=".*": regex for type name to match - -rt="": tags for go run - -t="": build tag to put in file - -u=false: Use unsafe, e.g. to avoid unnecessary allocation on []byte->string - -x=false: keep temp file - -% codecgen -o values_codecgen.go values.go values2.go moretypedefs.go -``` - -Please see the [blog article](http://ugorji.net/blog/go-codecgen) -for more information on how to use the tool. - diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go deleted file mode 100644 index 892df59..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// codecgen generates codec.Selfer implementations for a set of types. -package main - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "text/template" - "time" -) - -const genFrunMainTmpl = `//+build ignore - -package main -{{ if .Types }}import "{{ .ImportPath }}"{{ end }} -func main() { - {{ $.PackageName }}.CodecGenTempWrite{{ .RandString }}() -} -` - -// const genFrunPkgTmpl = `//+build codecgen -const genFrunPkgTmpl = ` -package {{ $.PackageName }} - -import ( - {{ if not .CodecPkgFiles }}{{ .CodecPkgName }} "{{ .CodecImportPath }}"{{ end }} -{{/* - {{ if .Types }}"{{ .ImportPath }}"{{ end }} - "io" -*/}} - "os" - "reflect" - "bytes" - "go/format" -) - -{{/* This is not used anymore. Remove it. -func write(w io.Writer, s string) { - if _, err := io.WriteString(w, s); err != nil { - panic(err) - } -} -*/}} - -func CodecGenTempWrite{{ .RandString }}() { - fout, err := os.Create("{{ .OutFile }}") - if err != nil { - panic(err) - } - defer fout.Close() - var out bytes.Buffer - - var typs []reflect.Type -{{ range $index, $element := .Types }} - var t{{ $index }} {{ . }} - typs = append(typs, reflect.TypeOf(t{{ $index }})) -{{ end }} - {{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}Gen(&out, "{{ .BuildTag }}", "{{ .PackageName }}", {{ .UseUnsafe }}, typs...) - bout, err := format.Source(out.Bytes()) - if err != nil { - fout.Write(out.Bytes()) - panic(err) - } - fout.Write(bout) -} - -` - -// Generate is given a list of *.go files to parse, and an output file (fout). -// -// It finds all types T in the files, and it creates 2 tmp files (frun). -// - main package file passed to 'go run' -// - package level file which calls *genRunner.Selfer to write Selfer impls for each T. -// We use a package level file so that it can reference unexported types in the package being worked on. -// Tool then executes: "go run __frun__" which creates fout. -// fout contains Codec(En|De)codeSelf implementations for every type T. -// -func Generate(outfile, buildTag, codecPkgPath string, useUnsafe bool, goRunTag string, - regexName *regexp.Regexp, deleteTempFile bool, infiles ...string) (err error) { - // For each file, grab AST, find each type, and write a call to it. - if len(infiles) == 0 { - return - } - if outfile == "" || codecPkgPath == "" { - err = errors.New("outfile and codec package path cannot be blank") - return - } - // We have to parse dir for package, before opening the temp file for writing (else ImportDir fails). - // Also, ImportDir(...) must take an absolute path. - lastdir := filepath.Dir(outfile) - absdir, err := filepath.Abs(lastdir) - if err != nil { - return - } - pkg, err := build.Default.ImportDir(absdir, build.AllowBinary) - if err != nil { - return - } - type tmplT struct { - CodecPkgName string - CodecImportPath string - ImportPath string - OutFile string - PackageName string - RandString string - BuildTag string - Types []string - CodecPkgFiles bool - UseUnsafe bool - } - tv := tmplT{ - CodecPkgName: "codec1978", - OutFile: outfile, - CodecImportPath: codecPkgPath, - BuildTag: buildTag, - UseUnsafe: useUnsafe, - RandString: strconv.FormatInt(time.Now().UnixNano(), 10), - } - tv.ImportPath = pkg.ImportPath - if tv.ImportPath == tv.CodecImportPath { - tv.CodecPkgFiles = true - tv.CodecPkgName = "codec" - } - astfiles := make([]*ast.File, len(infiles)) - for i, infile := range infiles { - if filepath.Dir(infile) != lastdir { - err = errors.New("in files must all be in same directory as outfile") - return - } - fset := token.NewFileSet() - astfiles[i], err = parser.ParseFile(fset, infile, nil, 0) - if err != nil { - return - } - if i == 0 { - tv.PackageName = astfiles[i].Name.Name - if tv.PackageName == "main" { - // codecgen cannot be run on types in the 'main' package. - // A temporary 'main' package must be created, and should reference the fully built - // package containing the types. - // Also, the temporary main package will conflict with the main package which already has a main method. - err = errors.New("codecgen cannot be run on types in the 'main' package") - return - } - } - } - - for _, f := range astfiles { - for _, d := range f.Decls { - if gd, ok := d.(*ast.GenDecl); ok { - for _, dd := range gd.Specs { - if td, ok := dd.(*ast.TypeSpec); ok { - // if len(td.Name.Name) == 0 || td.Name.Name[0] > 'Z' || td.Name.Name[0] < 'A' { - if len(td.Name.Name) == 0 { - continue - } - - // only generate for: - // struct: StructType - // primitives (numbers, bool, string): Ident - // map: MapType - // slice, array: ArrayType - // chan: ChanType - // do not generate: - // FuncType, InterfaceType, StarExpr (ptr), etc - switch td.Type.(type) { - case *ast.StructType, *ast.Ident, *ast.MapType, *ast.ArrayType, *ast.ChanType: - if regexName.FindStringIndex(td.Name.Name) != nil { - tv.Types = append(tv.Types, td.Name.Name) - } - } - } - } - } - } - } - - if len(tv.Types) == 0 { - return - } - - // we cannot use ioutil.TempFile, because we cannot guarantee the file suffix (.go). - // Also, we cannot create file in temp directory, - // because go run will not work (as it needs to see the types here). - // Consequently, create the temp file in the current directory, and remove when done. - - // frun, err = ioutil.TempFile("", "codecgen-") - // frunName := filepath.Join(os.TempDir(), "codecgen-"+strconv.FormatInt(time.Now().UnixNano(), 10)+".go") - - frunMainName := "codecgen-main-" + tv.RandString + ".generated.go" - frunPkgName := "codecgen-pkg-" + tv.RandString + ".generated.go" - if deleteTempFile { - defer os.Remove(frunMainName) - defer os.Remove(frunPkgName) - } - // var frunMain, frunPkg *os.File - if _, err = gen1(frunMainName, genFrunMainTmpl, &tv); err != nil { - return - } - if _, err = gen1(frunPkgName, genFrunPkgTmpl, &tv); err != nil { - return - } - - // remove outfile, so "go run ..." will not think that types in outfile already exist. - os.Remove(outfile) - - // execute go run frun - cmd := exec.Command("go", "run", "-tags="+goRunTag, frunMainName) //, frunPkg.Name()) - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - if err = cmd.Run(); err != nil { - err = fmt.Errorf("error running 'go run %s': %v, console: %s", - frunMainName, err, buf.Bytes()) - return - } - os.Stdout.Write(buf.Bytes()) - return -} - -func gen1(frunName, tmplStr string, tv interface{}) (frun *os.File, err error) { - os.Remove(frunName) - if frun, err = os.Create(frunName); err != nil { - return - } - defer frun.Close() - - t := template.New("") - if t, err = t.Parse(tmplStr); err != nil { - return - } - bw := bufio.NewWriter(frun) - if err = t.Execute(bw, tv); err != nil { - return - } - if err = bw.Flush(); err != nil { - return - } - return -} - -func main() { - o := flag.String("o", "", "out file") - c := flag.String("c", genCodecPath, "codec path") - t := flag.String("t", "", "build tag to put in file") - r := flag.String("r", ".*", "regex for type name to match") - rt := flag.String("rt", "", "tags for go run") - x := flag.Bool("x", false, "keep temp file") - u := flag.Bool("u", false, "Use unsafe, e.g. to avoid unnecessary allocation on []byte->string") - - flag.Parse() - if err := Generate(*o, *t, *c, *u, *rt, - regexp.MustCompile(*r), !*x, flag.Args()...); err != nil { - fmt.Fprintf(os.Stderr, "codecgen error: %v\n", err) - os.Exit(1) - } -} diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go deleted file mode 100644 index e120a4e..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go +++ /dev/null @@ -1,3 +0,0 @@ -package main - -const genCodecPath = "github.com/ugorji/go/codec" diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go deleted file mode 100644 index 2fdfd16..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go +++ /dev/null @@ -1,22 +0,0 @@ -//+build x,codecgen - -package codec - -import ( - "fmt" - "testing" -) - -func TestCodecgenJson1(t *testing.T) { - const callCodecgenDirect bool = true - v := newTestStruc(2, false, !testSkipIntf, false) - var bs []byte - e := NewEncoderBytes(&bs, testJsonH) - if callCodecgenDirect { - v.CodecEncodeSelf(e) - e.w.atEndOfEncode() - } else { - e.MustEncode(v) - } - fmt.Printf("%s\n", bs) -} diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go deleted file mode 100644 index 685c576..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// All non-std package dependencies related to testing live in this file, -// so porting to different environment is easy (just update functions). -// -// This file sets up the variables used, including testInitFns. -// Each file should add initialization that should be performed -// after flags are parsed. -// -// init is a multi-step process: -// - setup vars (handled by init functions in each file) -// - parse flags -// - setup derived vars (handled by pre-init registered functions - registered in init function) -// - post init (handled by post-init registered functions - registered in init function) -// This way, no one has to manage carefully control the initialization -// using file names, etc. -// -// Tests which require external dependencies need the -tag=x parameter. -// They should be run as: -// go test -tags=x -run=. -// Benchmarks should also take this parameter, to include the sereal, xdr, etc. -// To run against codecgen, etc, make sure you pass extra parameters. -// Example usage: -// go test "-tags=x codecgen unsafe" -bench=. -// -// To fully test everything: -// go test -tags=x -benchtime=100ms -tv -bg -bi -brw -bu -v -run=. -bench=. - -import ( - "errors" - "flag" - "fmt" - "reflect" - "sync" - "testing" -) - -const ( - testLogToT = true - failNowOnFail = true -) - -var ( - testNoopH = NoopHandle(8) - testMsgpackH = &MsgpackHandle{} - testBincH = &BincHandle{} - testBincHNoSym = &BincHandle{} - testBincHSym = &BincHandle{} - testSimpleH = &SimpleHandle{} - testCborH = &CborHandle{} - testJsonH = &JsonHandle{} - - testPreInitFns []func() - testPostInitFns []func() - - testOnce sync.Once -) - -func init() { - testBincHSym.AsSymbols = AsSymbolAll - testBincHNoSym.AsSymbols = AsSymbolNone -} - -func testInitAll() { - flag.Parse() - for _, f := range testPreInitFns { - f() - } - for _, f := range testPostInitFns { - f() - } -} - -func logT(x interface{}, format string, args ...interface{}) { - if t, ok := x.(*testing.T); ok && t != nil && testLogToT { - if testVerbose { - t.Logf(format, args...) - } - } else if b, ok := x.(*testing.B); ok && b != nil && testLogToT { - b.Logf(format, args...) - } else { - if len(format) == 0 || format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Printf(format, args...) - } -} - -func approxDataSize(rv reflect.Value) (sum int) { - switch rk := rv.Kind(); rk { - case reflect.Invalid: - case reflect.Ptr, reflect.Interface: - sum += int(rv.Type().Size()) - sum += approxDataSize(rv.Elem()) - case reflect.Slice: - sum += int(rv.Type().Size()) - for j := 0; j < rv.Len(); j++ { - sum += approxDataSize(rv.Index(j)) - } - case reflect.String: - sum += int(rv.Type().Size()) - sum += rv.Len() - case reflect.Map: - sum += int(rv.Type().Size()) - for _, mk := range rv.MapKeys() { - sum += approxDataSize(mk) - sum += approxDataSize(rv.MapIndex(mk)) - } - case reflect.Struct: - //struct size already includes the full data size. - //sum += int(rv.Type().Size()) - for j := 0; j < rv.NumField(); j++ { - sum += approxDataSize(rv.Field(j)) - } - default: - //pure value types - sum += int(rv.Type().Size()) - } - return -} - -// ----- functions below are used only by tests (not benchmarks) - -func checkErrT(t *testing.T, err error) { - if err != nil { - logT(t, err.Error()) - failT(t) - } -} - -func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) { - if err = deepEqual(v1, v2); err != nil { - logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2) - failT(t) - } - return -} - -func failT(t *testing.T) { - if failNowOnFail { - t.FailNow() - } else { - t.Fail() - } -} - -func deepEqual(v1, v2 interface{}) (err error) { - if !reflect.DeepEqual(v1, v2) { - err = errors.New("Not Match") - } - return -} diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/py_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/py_test.go deleted file mode 100644 index be0374c..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/py_test.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build x - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// These tests are used to verify msgpack and cbor implementations against their python libraries. -// If you have the library installed, you can enable the tests back by removing the //+build ignore. - -import ( - "testing" -) - -func TestMsgpackPythonGenStreams(t *testing.T) { - doTestPythonGenStreams(t, "msgpack", testMsgpackH) -} - -func TestCborPythonGenStreams(t *testing.T) { - doTestPythonGenStreams(t, "cbor", testCborH) -} - -func TestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { - doTestMsgpackRpcSpecGoClientToPythonSvc(t) -} - -func TestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { - doTestMsgpackRpcSpecPythonClientToGoSvc(t) -} diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go deleted file mode 100644 index 4ec28e1..0000000 --- a/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go +++ /dev/null @@ -1,203 +0,0 @@ -// // +build testing - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// This file contains values used by tests and benchmarks. -// JSON/BSON do not like maps with keys that are not strings, -// so we only use maps with string keys here. - -import ( - "math" - "time" -) - -var testStrucTime = time.Date(2012, 2, 2, 2, 2, 2, 2000, time.UTC).UTC() - -type AnonInTestStruc struct { - AS string - AI64 int64 - AI16 int16 - AUi64 uint64 - ASslice []string - AI64slice []int64 - AF64slice []float64 - // AMI32U32 map[int32]uint32 - // AMU32F64 map[uint32]float64 // json/bson do not like it - AMSU16 map[string]uint16 -} - -type AnonInTestStrucIntf struct { - Islice []interface{} - Ms map[string]interface{} - Nintf interface{} //don't set this, so we can test for nil - T time.Time -} - -type TestStruc struct { - _struct struct{} `codec:",omitempty"` //set omitempty for every field - - S string - I64 int64 - I16 int16 - Ui64 uint64 - Ui8 uint8 - B bool - By uint8 // byte: msgp doesn't like byte - - Sslice []string - I64slice []int64 - I16slice []int16 - Ui64slice []uint64 - Ui8slice []uint8 - Bslice []bool - Byslice []byte - - Iptrslice []*int64 - - // TODO: test these separately, specifically for reflection and codecgen. - // Unfortunately, ffjson doesn't support these. Its compilation even fails. - // Ui64array [4]uint64 - // Ui64slicearray [][4]uint64 - - AnonInTestStruc - - //M map[interface{}]interface{} `json:"-",bson:"-"` - Msi64 map[string]int64 - - // make this a ptr, so that it could be set or not. - // for comparison (e.g. with msgp), give it a struct tag (so it is not inlined), - // make this one omitempty (so it is included if nil). - *AnonInTestStrucIntf `codec:",omitempty"` - - Nmap map[string]bool //don't set this, so we can test for nil - Nslice []byte //don't set this, so we can test for nil - Nint64 *int64 //don't set this, so we can test for nil - Mtsptr map[string]*TestStruc - Mts map[string]TestStruc - Its []*TestStruc - Nteststruc *TestStruc -} - -// small struct for testing that codecgen works for unexported types -type tLowerFirstLetter struct { - I int - u uint64 - S string - b []byte -} - -func newTestStruc(depth int, bench bool, useInterface, useStringKeyOnly bool) (ts *TestStruc) { - var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464 - - ts = &TestStruc{ - S: "some string", - I64: math.MaxInt64 * 2 / 3, // 64, - I16: 1616, - Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it - Ui8: 160, - B: true, - By: 5, - - Sslice: []string{"one", "two", "three"}, - I64slice: []int64{1111, 2222, 3333}, - I16slice: []int16{44, 55, 66}, - Ui64slice: []uint64{12121212, 34343434, 56565656}, - Ui8slice: []uint8{210, 211, 212}, - Bslice: []bool{true, false, true, false}, - Byslice: []byte{13, 14, 15}, - - Msi64: map[string]int64{ - "one": 1, - "two": 2, - }, - AnonInTestStruc: AnonInTestStruc{ - // There's more leeway in altering this. - AS: "A-String", - AI64: -64646464, - AI16: 1616, - AUi64: 64646464, - // (U+1D11E)G-clef character may be represented in json as "\uD834\uDD1E". - // single reverse solidus character may be represented in json as "\u005C". - // include these in ASslice below. - ASslice: []string{"Aone", "Atwo", "Athree", - "Afour.reverse_solidus.\u005c", "Afive.Gclef.\U0001d11E"}, - AI64slice: []int64{1, -22, 333, -4444, 55555, -666666}, - AMSU16: map[string]uint16{"1": 1, "22": 2, "333": 3, "4444": 4}, - AF64slice: []float64{11.11e-11, 22.22E+22, 33.33E-33, 44.44e+44, 555.55E-6, 666.66E6}, - }, - } - if useInterface { - ts.AnonInTestStrucIntf = &AnonInTestStrucIntf{ - Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)}, - Ms: map[string]interface{}{ - "true": "true", - "int64(9)": false, - }, - T: testStrucTime, - } - } - - //For benchmarks, some things will not work. - if !bench { - //json and bson require string keys in maps - //ts.M = map[interface{}]interface{}{ - // true: "true", - // int8(9): false, - //} - //gob cannot encode nil in element in array (encodeArray: nil element) - ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil} - // ts.Iptrslice = nil - } - if !useStringKeyOnly { - // ts.AnonInTestStruc.AMU32F64 = map[uint32]float64{1: 1, 2: 2, 3: 3} // Json/Bson barf - } - if depth > 0 { - depth-- - if ts.Mtsptr == nil { - ts.Mtsptr = make(map[string]*TestStruc) - } - if ts.Mts == nil { - ts.Mts = make(map[string]TestStruc) - } - ts.Mtsptr["0"] = newTestStruc(depth, bench, useInterface, useStringKeyOnly) - ts.Mts["0"] = *(ts.Mtsptr["0"]) - ts.Its = append(ts.Its, ts.Mtsptr["0"]) - } - return -} - -// Some other types - -type Sstring string -type Bbool bool -type Sstructsmall struct { - A int -} - -type Sstructbig struct { - A int - B bool - c string - // Sval Sstruct - Ssmallptr *Sstructsmall - Ssmall *Sstructsmall - Sptr *Sstructbig -} - -type SstructbigMapBySlice struct { - _struct struct{} `codec:",toarray"` - A int - B bool - c string - // Sval Sstruct - Ssmallptr *Sstructsmall - Ssmall *Sstructsmall - Sptr *Sstructbig -} - -type Sinterface interface { - Noop() -} diff --git a/Makefile b/Makefile index 586617d..89694ee 100644 --- a/Makefile +++ b/Makefile @@ -11,36 +11,44 @@ export LD_LIBRARY_PATH export DYLD_LIBRARY_PATH export GO_BUILD_TAGS +GO=GO15VENDOREXPERIMENT="1" go + +PKGS=$(shell $(GO) list ./... | grep -v "cmd") + all: build build: - $(GO) install -tags '$(GO_BUILD_TAGS)' ./... + $(GO) build -o bin/ledis-server -tags '$(GO_BUILD_TAGS)' cmd/ledis-server/* + $(GO) build -o bin/ledis-cli -tags '$(GO_BUILD_TAGS)' cmd/ledis-cli/* -build_lmdb: - $(GO) install -tags '$(GO_BUILD_TAGS) lmdb' ./... +build_all: build + $(GO) build -o bin/ledis-benchmark -tags '$(GO_BUILD_TAGS)' cmd/ledis-benchmark/* + $(GO) build -o bin/ledis-dump -tags '$(GO_BUILD_TAGS)' cmd/ledis-dump/* + $(GO) build -o bin/ledis-load -tags '$(GO_BUILD_TAGS)' cmd/ledis-load/* + $(GO) build -o bin/ledis-repair -tags '$(GO_BUILD_TAGS)' cmd/ledis-repair/* test: - $(GO) test --race -tags '$(GO_BUILD_TAGS)' ./... - -test_lmdb: - $(GO) test --race -tags '$(GO_BUILD_TAGS) lmdb' ./... - -test_ledis: - $(GO) test --race -tags '$(GO_BUILD_TAGS)' ./ledis - -test_server: - $(GO) test --race -tags '$(GO_BUILD_TAGS)' ./server - -test_store: - $(GO) test --race -tags '$(GO_BUILD_TAGS)' ./store - -test_rpl: - $(GO) test --race -tags '$(GO_BUILD_TAGS)' ./rpl + # use vendor for test + @rm -rf vendor && ln -s cmd/vendor vendor + @$(GO) test --race -tags '$(GO_BUILD_TAGS)' $(PKGS) + @rm -rf vendor clean: - rm -rf Godeps/_workspace/pkg/ $(GO) clean -i ./... fmt: - gofmt -w=true cmd config ledis rpl server store vendor - goimports -w=true cmd config ledis rpl server store vendor \ No newline at end of file + gofmt -w -s . 2>&1 | grep -vE 'vendor' | awk '{print} END{if(NR>0) {exit 1}}' + +deps: + # see https://github.com/coreos/etcd/blob/master/scripts/updatedep.sh + rm -rf Godeps vendor cmd/vendor + mkdir -p cmd/vendor + ln -s cmd/vendor vendor + godep save ./... + rm -rf cmd/Godeps + rm vendor + mv Godeps cmd/ + +travis: + @rm -rf vendor && ln -s cmd/vendor vendor + @$(GO) test --race -tags '$(GO_BUILD_TAGS)' $(PKGS) \ No newline at end of file diff --git a/README.md b/README.md index 51cda5a..1b920a4 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ LedisDB now supports multiple different databases as backends. + Rich data structure: KV, List, Hash, ZSet, Set. + Data storage is not limited by RAM. -+ Various backends supported: LevelDB, goleveldb, LMDB, RocksDB, BoltDB, RAM. ++ Various backends supported: LevelDB, goleveldb, RocksDB, RAM. + Supports Lua scripting. + Supports expiration and TTL. + Can be managed via redis-cli. @@ -43,13 +43,6 @@ Create a workspace and checkout ledisdb source make make test -## Godep support - -LedisDB now prefers using [godep](https://github.com/tools/godep) to build, godep can manage the go dependences easily. - -If you don't want to use godep, you can first run `sh bootstrap.sh` to download the depencenecs and then `make`, -but I will not guarantee the dependence compatibility. - ## LevelDB support + Install leveldb and snappy. @@ -82,7 +75,7 @@ If the RocksDB API changes, LedisDB may not build successfully. LedisDB currentl ## Choose store database -LedisDB now supports goleveldb, lmdb, leveldb, rocksdb, boltdb, and RAM. It will use goleveldb by default. +LedisDB now supports goleveldb, leveldb, rocksdb, and RAM. It will use goleveldb by default. Choosing a store database to use is very simple. @@ -113,10 +106,10 @@ If you don't use a configuration, LedisDB will use the default for you. //set run environment if not source dev.sh - ledis-server -config=/etc/ledis.conf + ./bin/ledis-server -config=/etc/ledis.conf //another shell - ledis-cli -p 6380 + ./bin/ledis-cli -p 6380 ledis 127.0.0.1:6380> set a 1 OK @@ -187,9 +180,8 @@ See [Clients](https://github.com/siddontang/ledisdb/wiki/Clients) to find or con ## Requirement -+ Go version >= 1.3 ++ Go version >= 1.5 ## Feedback + Gmail: siddontang@gmail.com -+ Skype: live:siddontang_1 diff --git a/bootstrap.sh b/bootstrap.sh deleted file mode 100755 index 4adb8fd..0000000 --- a/bootstrap.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -. ./dev.sh - -# Test godep install -godep path > /dev/null 2>&1 -if [ "$?" = 0 ]; then - exit 0 -fi - -echo "Please use [godep](https://github.com/tools/godep) to build LedisDB, :-)" - -go get -d ./... diff --git a/cmd/Godeps/Godeps.json b/cmd/Godeps/Godeps.json new file mode 100644 index 0000000..cc427b4 --- /dev/null +++ b/cmd/Godeps/Godeps.json @@ -0,0 +1,135 @@ +{ + "ImportPath": "github.com/siddontang/ledisdb", + "GoVersion": "go1.6", + "GodepVersion": "v62", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/BurntSushi/toml", + "Comment": "v0.1.0-21-g056c9bc", + "Rev": "056c9bc7be7190eaa7715723883caffa5f8fa3e4" + }, + { + "ImportPath": "github.com/cupcake/rdb", + "Rev": "3454dcabd33cb8ea8261ffd6a45f4d836eb504cc" + }, + { + "ImportPath": "github.com/cupcake/rdb/crc64", + "Rev": "3454dcabd33cb8ea8261ffd6a45f4d836eb504cc" + }, + { + "ImportPath": "github.com/cupcake/rdb/nopdecoder", + "Rev": "3454dcabd33cb8ea8261ffd6a45f4d836eb504cc" + }, + { + "ImportPath": "github.com/edsrzf/mmap-go", + "Rev": "f9d5617258fa999241e93b43f2ff89e4507cc3f2" + }, + { + "ImportPath": "github.com/golang/snappy", + "Rev": "723cc1e459b8eea2dea4583200fd60757d40097a" + }, + { + "ImportPath": "github.com/peterh/liner", + "Rev": "d5e5aeeb67ca5aeeddeb0b6c3af05421ff63a0b6" + }, + { + "ImportPath": "github.com/siddontang/go/bson", + "Rev": "354e14e6c093c661abb29fd28403b3c19cff5514" + }, + { + "ImportPath": "github.com/siddontang/go/filelock", + "Rev": "354e14e6c093c661abb29fd28403b3c19cff5514" + }, + { + "ImportPath": "github.com/siddontang/go/hack", + "Rev": "354e14e6c093c661abb29fd28403b3c19cff5514" + }, + { + "ImportPath": "github.com/siddontang/go/ioutil2", + "Rev": "354e14e6c093c661abb29fd28403b3c19cff5514" + }, + { + "ImportPath": "github.com/siddontang/go/log", + "Rev": "354e14e6c093c661abb29fd28403b3c19cff5514" + }, + { + "ImportPath": "github.com/siddontang/go/num", + "Rev": "354e14e6c093c661abb29fd28403b3c19cff5514" + }, + { + "ImportPath": "github.com/siddontang/go/snappy", + "Rev": "354e14e6c093c661abb29fd28403b3c19cff5514" + }, + { + "ImportPath": "github.com/siddontang/go/sync2", + "Rev": "354e14e6c093c661abb29fd28403b3c19cff5514" + }, + { + "ImportPath": "github.com/siddontang/golua", + "Rev": "f73e23294ebf29412e1e96bb6163be152700d250" + }, + { + "ImportPath": "github.com/siddontang/goredis", + "Rev": "760763f78400635ed7b9b115511b8ed06035e908" + }, + { + "ImportPath": "github.com/siddontang/rdb", + "Rev": "fc89ed2e418d27e3ea76e708e54276d2b44ae9cf" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/cache", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/errors", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/filter", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/journal", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/opt", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/storage", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/table", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb/util", + "Rev": "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + }, + { + "ImportPath": "github.com/ugorji/go/codec", + "Rev": "5abd4e96a45c386928ed2ca2a7ef63e2533e18ec" + } + ] +} diff --git a/Godeps/Readme b/cmd/Godeps/Readme similarity index 100% rename from Godeps/Readme rename to cmd/Godeps/Readme diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore b/cmd/vendor/github.com/BurntSushi/toml/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore rename to cmd/vendor/github.com/BurntSushi/toml/.gitignore diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml b/cmd/vendor/github.com/BurntSushi/toml/.travis.yml similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml rename to cmd/vendor/github.com/BurntSushi/toml/.travis.yml diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/COMPATIBLE b/cmd/vendor/github.com/BurntSushi/toml/COMPATIBLE similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/COMPATIBLE rename to cmd/vendor/github.com/BurntSushi/toml/COMPATIBLE diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/COPYING b/cmd/vendor/github.com/BurntSushi/toml/COPYING similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/COPYING rename to cmd/vendor/github.com/BurntSushi/toml/COPYING diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/Makefile b/cmd/vendor/github.com/BurntSushi/toml/Makefile similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/Makefile rename to cmd/vendor/github.com/BurntSushi/toml/Makefile diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md b/cmd/vendor/github.com/BurntSushi/toml/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/README.md rename to cmd/vendor/github.com/BurntSushi/toml/README.md diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go b/cmd/vendor/github.com/BurntSushi/toml/decode.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go rename to cmd/vendor/github.com/BurntSushi/toml/decode.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go b/cmd/vendor/github.com/BurntSushi/toml/decode_meta.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go rename to cmd/vendor/github.com/BurntSushi/toml/decode_meta.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/doc.go b/cmd/vendor/github.com/BurntSushi/toml/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/doc.go rename to cmd/vendor/github.com/BurntSushi/toml/doc.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go b/cmd/vendor/github.com/BurntSushi/toml/encode.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go rename to cmd/vendor/github.com/BurntSushi/toml/encode.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go b/cmd/vendor/github.com/BurntSushi/toml/encoding_types.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go rename to cmd/vendor/github.com/BurntSushi/toml/encoding_types.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go b/cmd/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go rename to cmd/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go b/cmd/vendor/github.com/BurntSushi/toml/lex.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go rename to cmd/vendor/github.com/BurntSushi/toml/lex.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go b/cmd/vendor/github.com/BurntSushi/toml/parse.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go rename to cmd/vendor/github.com/BurntSushi/toml/parse.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim b/cmd/vendor/github.com/BurntSushi/toml/session.vim similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim rename to cmd/vendor/github.com/BurntSushi/toml/session.vim diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go b/cmd/vendor/github.com/BurntSushi/toml/type_check.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go rename to cmd/vendor/github.com/BurntSushi/toml/type_check.go diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go b/cmd/vendor/github.com/BurntSushi/toml/type_fields.go similarity index 100% rename from Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go rename to cmd/vendor/github.com/BurntSushi/toml/type_fields.go diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/.gitignore b/cmd/vendor/github.com/cupcake/rdb/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/.gitignore rename to cmd/vendor/github.com/cupcake/rdb/.gitignore diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/.travis.yml b/cmd/vendor/github.com/cupcake/rdb/.travis.yml similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/.travis.yml rename to cmd/vendor/github.com/cupcake/rdb/.travis.yml diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/LICENCE b/cmd/vendor/github.com/cupcake/rdb/LICENCE similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/LICENCE rename to cmd/vendor/github.com/cupcake/rdb/LICENCE diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/README.md b/cmd/vendor/github.com/cupcake/rdb/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/README.md rename to cmd/vendor/github.com/cupcake/rdb/README.md diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/crc64/crc64.go b/cmd/vendor/github.com/cupcake/rdb/crc64/crc64.go similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/crc64/crc64.go rename to cmd/vendor/github.com/cupcake/rdb/crc64/crc64.go diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/decoder.go b/cmd/vendor/github.com/cupcake/rdb/decoder.go similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/decoder.go rename to cmd/vendor/github.com/cupcake/rdb/decoder.go diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/encoder.go b/cmd/vendor/github.com/cupcake/rdb/encoder.go similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/encoder.go rename to cmd/vendor/github.com/cupcake/rdb/encoder.go diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/nopdecoder/nop_decoder.go b/cmd/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/nopdecoder/nop_decoder.go rename to cmd/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go diff --git a/Godeps/_workspace/src/github.com/cupcake/rdb/slice_buffer.go b/cmd/vendor/github.com/cupcake/rdb/slice_buffer.go similarity index 100% rename from Godeps/_workspace/src/github.com/cupcake/rdb/slice_buffer.go rename to cmd/vendor/github.com/cupcake/rdb/slice_buffer.go diff --git a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/.gitignore b/cmd/vendor/github.com/edsrzf/mmap-go/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/edsrzf/mmap-go/.gitignore rename to cmd/vendor/github.com/edsrzf/mmap-go/.gitignore diff --git a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/LICENSE b/cmd/vendor/github.com/edsrzf/mmap-go/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/edsrzf/mmap-go/LICENSE rename to cmd/vendor/github.com/edsrzf/mmap-go/LICENSE diff --git a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/README.md b/cmd/vendor/github.com/edsrzf/mmap-go/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/edsrzf/mmap-go/README.md rename to cmd/vendor/github.com/edsrzf/mmap-go/README.md diff --git a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap.go b/cmd/vendor/github.com/edsrzf/mmap-go/mmap.go similarity index 100% rename from Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap.go rename to cmd/vendor/github.com/edsrzf/mmap-go/mmap.go diff --git a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_unix.go b/cmd/vendor/github.com/edsrzf/mmap-go/mmap_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_unix.go rename to cmd/vendor/github.com/edsrzf/mmap-go/mmap_unix.go diff --git a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_windows.go b/cmd/vendor/github.com/edsrzf/mmap-go/mmap_windows.go similarity index 78% rename from Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_windows.go rename to cmd/vendor/github.com/edsrzf/mmap-go/mmap_windows.go index abf70f8..1e0bd2e 100644 --- a/Godeps/_workspace/src/github.com/edsrzf/mmap-go/mmap_windows.go +++ b/cmd/vendor/github.com/edsrzf/mmap-go/mmap_windows.go @@ -38,13 +38,23 @@ func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) { dwDesiredAccess |= syscall.FILE_MAP_EXECUTE } + // The maximum size is the area of the file, starting from 0, + // that we wish to allow to be mappable. It is the sum of + // the length the user requested, plus the offset where that length + // is starting from. This does not map the data into memory. + maxSizeHigh := uint32((off + int64(len)) >> 32) + maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF) // TODO: Do we need to set some security attributes? It might help portability. - h, errno := syscall.CreateFileMapping(syscall.Handle(hfile), nil, flProtect, 0, uint32(len), nil) + h, errno := syscall.CreateFileMapping(syscall.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil) if h == 0 { return nil, os.NewSyscallError("CreateFileMapping", errno) } - addr, errno := syscall.MapViewOfFile(h, dwDesiredAccess, uint32(off>>32), uint32(off&0xFFFFFFFF), uintptr(len)) + // Actually map a view of the data into memory. The view's size + // is the length the user requested. + fileOffsetHigh := uint32(off >> 32) + fileOffsetLow := uint32(off & 0xFFFFFFFF) + addr, errno := syscall.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len)) if addr == 0 { return nil, os.NewSyscallError("MapViewOfFile", errno) } diff --git a/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS b/cmd/vendor/github.com/golang/snappy/AUTHORS similarity index 100% rename from Godeps/_workspace/src/github.com/golang/snappy/AUTHORS rename to cmd/vendor/github.com/golang/snappy/AUTHORS diff --git a/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS b/cmd/vendor/github.com/golang/snappy/CONTRIBUTORS similarity index 100% rename from Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS rename to cmd/vendor/github.com/golang/snappy/CONTRIBUTORS diff --git a/Godeps/_workspace/src/github.com/golang/snappy/LICENSE b/cmd/vendor/github.com/golang/snappy/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/golang/snappy/LICENSE rename to cmd/vendor/github.com/golang/snappy/LICENSE diff --git a/Godeps/_workspace/src/github.com/golang/snappy/README b/cmd/vendor/github.com/golang/snappy/README similarity index 100% rename from Godeps/_workspace/src/github.com/golang/snappy/README rename to cmd/vendor/github.com/golang/snappy/README diff --git a/Godeps/_workspace/src/github.com/golang/snappy/decode.go b/cmd/vendor/github.com/golang/snappy/decode.go similarity index 100% rename from Godeps/_workspace/src/github.com/golang/snappy/decode.go rename to cmd/vendor/github.com/golang/snappy/decode.go diff --git a/Godeps/_workspace/src/github.com/golang/snappy/encode.go b/cmd/vendor/github.com/golang/snappy/encode.go similarity index 100% rename from Godeps/_workspace/src/github.com/golang/snappy/encode.go rename to cmd/vendor/github.com/golang/snappy/encode.go diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy.go b/cmd/vendor/github.com/golang/snappy/snappy.go similarity index 100% rename from Godeps/_workspace/src/github.com/golang/snappy/snappy.go rename to cmd/vendor/github.com/golang/snappy/snappy.go diff --git a/Godeps/_workspace/src/github.com/peterh/liner/COPYING b/cmd/vendor/github.com/peterh/liner/COPYING similarity index 100% rename from Godeps/_workspace/src/github.com/peterh/liner/COPYING rename to cmd/vendor/github.com/peterh/liner/COPYING diff --git a/Godeps/_workspace/src/github.com/peterh/liner/README.md b/cmd/vendor/github.com/peterh/liner/README.md similarity index 87% rename from Godeps/_workspace/src/github.com/peterh/liner/README.md rename to cmd/vendor/github.com/peterh/liner/README.md index 99027c6..9148b24 100644 --- a/Godeps/_workspace/src/github.com/peterh/liner/README.md +++ b/cmd/vendor/github.com/peterh/liner/README.md @@ -21,8 +21,8 @@ Ctrl-A, Home | Move cursor to beginning of line Ctrl-E, End | Move cursor to end of line Ctrl-B, Left | Move cursor one character left Ctrl-F, Right| Move cursor one character right -Ctrl-Left | Move cursor to previous word -Ctrl-Right | Move cursor to next word +Ctrl-Left, Alt-B | Move cursor to previous word +Ctrl-Right, Alt-F | Move cursor to next word Ctrl-D, Del | (if line is *not* empty) Delete character under cursor Ctrl-D | (if line *is* empty) End of File - usually quits application Ctrl-C | Reset input (create new empty prompt) @@ -48,13 +48,14 @@ package main import ( "log" "os" + "path/filepath" "strings" "github.com/peterh/liner" ) var ( - history_fn = "/tmp/.liner_history" + history_fn = filepath.Join(os.TempDir(), ".liner_example_history") names = []string{"john", "james", "mary", "nancy"} ) @@ -62,6 +63,8 @@ func main() { line := liner.NewLiner() defer line.Close() + line.SetCtrlCAborts(true) + line.SetCompleter(func(line string) (c []string) { for _, n := range names { if strings.HasPrefix(n, strings.ToLower(line)) { @@ -76,11 +79,13 @@ func main() { f.Close() } - if name, err := line.Prompt("What is your name? "); err != nil { - log.Print("Error reading line: ", err) - } else { + if name, err := line.Prompt("What is your name? "); err == nil { log.Print("Got: ", name) line.AppendHistory(name) + } else if err == liner.ErrPromptAborted { + log.Print("Aborted") + } else { + log.Print("Error reading line: ", err) } if f, err := os.Create(history_fn); err != nil { diff --git a/Godeps/_workspace/src/github.com/peterh/liner/bsdinput.go b/cmd/vendor/github.com/peterh/liner/bsdinput.go similarity index 100% rename from Godeps/_workspace/src/github.com/peterh/liner/bsdinput.go rename to cmd/vendor/github.com/peterh/liner/bsdinput.go diff --git a/Godeps/_workspace/src/github.com/peterh/liner/common.go b/cmd/vendor/github.com/peterh/liner/common.go similarity index 95% rename from Godeps/_workspace/src/github.com/peterh/liner/common.go rename to cmd/vendor/github.com/peterh/liner/common.go index 9424abe..5f9181a 100644 --- a/Godeps/_workspace/src/github.com/peterh/liner/common.go +++ b/cmd/vendor/github.com/peterh/liner/common.go @@ -29,6 +29,9 @@ type commonState struct { ctrlCAborts bool r *bufio.Reader tabStyle TabStyle + multiLineMode bool + cursorRows int + maxRows int } // TabStyle is used to select how tab completions are displayed. @@ -174,7 +177,7 @@ func (s *State) SetCompleter(f Completer) { return } s.completer = func(line string, pos int) (string, []string, string) { - return "", f(line[:pos]), line[pos:] + return "", f(string([]rune(line)[:pos])), string([]rune(line)[pos:]) } } @@ -207,6 +210,11 @@ func (s *State) SetCtrlCAborts(aborts bool) { s.ctrlCAborts = aborts } +// SetMultiLineMode sets whether line is auto-wrapped. The default is false (single line). +func (s *State) SetMultiLineMode(mlmode bool) { + s.multiLineMode = mlmode +} + func (s *State) promptUnsupported(p string) (string, error) { if !s.inputRedirected || !s.terminalSupported { fmt.Print(p) diff --git a/Godeps/_workspace/src/github.com/peterh/liner/fallbackinput.go b/cmd/vendor/github.com/peterh/liner/fallbackinput.go similarity index 100% rename from Godeps/_workspace/src/github.com/peterh/liner/fallbackinput.go rename to cmd/vendor/github.com/peterh/liner/fallbackinput.go diff --git a/Godeps/_workspace/src/github.com/peterh/liner/input.go b/cmd/vendor/github.com/peterh/liner/input.go similarity index 97% rename from Godeps/_workspace/src/github.com/peterh/liner/input.go rename to cmd/vendor/github.com/peterh/liner/input.go index 94a8215..c80c85f 100644 --- a/Godeps/_workspace/src/github.com/peterh/liner/input.go +++ b/cmd/vendor/github.com/peterh/liner/input.go @@ -329,6 +329,12 @@ func (s *State) readNext() (interface{}, error) { default: return unknown, nil } + case 'b': + s.pending = s.pending[:0] // escape code complete + return altB, nil + case 'f': + s.pending = s.pending[:0] // escape code complete + return altF, nil case 'y': s.pending = s.pending[:0] // escape code complete return altY, nil diff --git a/Godeps/_workspace/src/github.com/peterh/liner/input_darwin.go b/cmd/vendor/github.com/peterh/liner/input_darwin.go similarity index 100% rename from Godeps/_workspace/src/github.com/peterh/liner/input_darwin.go rename to cmd/vendor/github.com/peterh/liner/input_darwin.go diff --git a/Godeps/_workspace/src/github.com/peterh/liner/input_linux.go b/cmd/vendor/github.com/peterh/liner/input_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/peterh/liner/input_linux.go rename to cmd/vendor/github.com/peterh/liner/input_linux.go diff --git a/Godeps/_workspace/src/github.com/peterh/liner/input_windows.go b/cmd/vendor/github.com/peterh/liner/input_windows.go similarity index 95% rename from Godeps/_workspace/src/github.com/peterh/liner/input_windows.go rename to cmd/vendor/github.com/peterh/liner/input_windows.go index cc98719..9dcc311 100644 --- a/Godeps/_workspace/src/github.com/peterh/liner/input_windows.go +++ b/cmd/vendor/github.com/peterh/liner/input_windows.go @@ -132,6 +132,8 @@ const ( vk_f10 = 0x79 vk_f11 = 0x7a vk_f12 = 0x7b + bKey = 0x42 + fKey = 0x46 yKey = 0x59 ) @@ -178,6 +180,12 @@ func (s *State) readNext() (interface{}, error) { if ke.VirtualKeyCode == vk_tab && ke.ControlKeyState&modKeys == shiftPressed { s.key = shiftTab + } else if ke.VirtualKeyCode == bKey && (ke.ControlKeyState&modKeys == leftAltPressed || + ke.ControlKeyState&modKeys == rightAltPressed) { + s.key = altB + } else if ke.VirtualKeyCode == fKey && (ke.ControlKeyState&modKeys == leftAltPressed || + ke.ControlKeyState&modKeys == rightAltPressed) { + s.key = altF } else if ke.VirtualKeyCode == yKey && (ke.ControlKeyState&modKeys == leftAltPressed || ke.ControlKeyState&modKeys == rightAltPressed) { s.key = altY diff --git a/Godeps/_workspace/src/github.com/peterh/liner/line.go b/cmd/vendor/github.com/peterh/liner/line.go similarity index 81% rename from Godeps/_workspace/src/github.com/peterh/liner/line.go rename to cmd/vendor/github.com/peterh/liner/line.go index 87ce693..32f9028 100644 --- a/Godeps/_workspace/src/github.com/peterh/liner/line.go +++ b/cmd/vendor/github.com/peterh/liner/line.go @@ -37,6 +37,8 @@ const ( f10 f11 f12 + altB + altF altY shiftTab wordLeft @@ -88,6 +90,14 @@ const ( ) func (s *State) refresh(prompt []rune, buf []rune, pos int) error { + if s.multiLineMode { + return s.refreshMultiLine(prompt, buf, pos) + } else { + return s.refreshSingleLine(prompt, buf, pos) + } +} + +func (s *State) refreshSingleLine(prompt []rune, buf []rune, pos int) error { s.cursorPos(0) _, err := fmt.Print(string(prompt)) if err != nil { @@ -143,6 +153,82 @@ func (s *State) refresh(prompt []rune, buf []rune, pos int) error { return err } +func (s *State) refreshMultiLine(prompt []rune, buf []rune, pos int) error { + promptColumns := countMultiLineGlyphs(prompt, s.columns, 0) + totalColumns := countMultiLineGlyphs(buf, s.columns, promptColumns) + totalRows := (totalColumns + s.columns - 1) / s.columns + maxRows := s.maxRows + if totalRows > s.maxRows { + s.maxRows = totalRows + } + cursorRows := s.cursorRows + if cursorRows == 0 { + cursorRows = 1 + } + + /* First step: clear all the lines used before. To do so start by + * going to the last row. */ + if maxRows-cursorRows > 0 { + s.moveDown(maxRows - cursorRows) + } + + /* Now for every row clear it, go up. */ + for i := 0; i < maxRows-1; i++ { + s.cursorPos(0) + s.eraseLine() + s.moveUp(1) + } + + /* Clean the top line. */ + s.cursorPos(0) + s.eraseLine() + + /* Write the prompt and the current buffer content */ + if _, err := fmt.Print(string(prompt)); err != nil { + return err + } + if _, err := fmt.Print(string(buf)); err != nil { + return err + } + + /* If we are at the very end of the screen with our prompt, we need to + * emit a newline and move the prompt to the first column. */ + cursorColumns := countMultiLineGlyphs(buf[:pos], s.columns, promptColumns) + if cursorColumns == totalColumns && totalColumns%s.columns == 0 { + s.emitNewLine() + s.cursorPos(0) + totalRows++ + if totalRows > s.maxRows { + s.maxRows = totalRows + } + } + + /* Move cursor to right position. */ + cursorRows = (cursorColumns + s.columns) / s.columns + if s.cursorRows > 0 && totalRows-cursorRows > 0 { + s.moveUp(totalRows - cursorRows) + } + /* Set column. */ + s.cursorPos(cursorColumns % s.columns) + + s.cursorRows = cursorRows + return nil +} + +func (s *State) resetMultiLine(prompt []rune, buf []rune, pos int) { + columns := countMultiLineGlyphs(prompt, s.columns, 0) + columns = countMultiLineGlyphs(buf[:pos], s.columns, columns) + columns += 2 // ^C + cursorRows := (columns + s.columns) / s.columns + if s.maxRows-cursorRows > 0 { + for i := 0; i < s.maxRows-cursorRows; i++ { + fmt.Println() // always moves the cursor down or scrolls the window up as needed + } + } + s.maxRows = 1 + s.cursorRows = 0 +} + func longestCommonPrefix(strs []string) string { if len(strs) == 0 { return "" @@ -179,6 +265,29 @@ func (s *State) circularTabs(items []string) func(tabDirection) (string, error) } } +func calculateColumns(screenWidth int, items []string) (numColumns, numRows, maxWidth int) { + for _, item := range items { + if len(item) >= screenWidth { + return 1, len(items), screenWidth - 1 + } + if len(item) >= maxWidth { + maxWidth = len(item) + 1 + } + } + + numColumns = screenWidth / maxWidth + numRows = len(items) / numColumns + if len(items)%numColumns > 0 { + numRows++ + } + + if len(items) <= numColumns { + maxWidth = 0 + } + + return +} + func (s *State) printedTabs(items []string) func(tabDirection) (string, error) { numTabs := 1 prefix := longestCommonPrefix(items) @@ -190,6 +299,7 @@ func (s *State) printedTabs(items []string) func(tabDirection) (string, error) { if numTabs == 2 { if len(items) > 100 { fmt.Printf("\nDisplay all %d possibilities? (y or n) ", len(items)) + prompt: for { next, err := s.readNext() if err != nil { @@ -197,36 +307,26 @@ func (s *State) printedTabs(items []string) func(tabDirection) (string, error) { } if key, ok := next.(rune); ok { - if unicode.ToLower(key) == 'n' { + switch key { + case 'n', 'N': return prefix, nil - } else if unicode.ToLower(key) == 'y' { - break + case 'y', 'Y': + break prompt + case ctrlC, ctrlD, cr, lf: + s.restartPrompt() } } } } fmt.Println("") - maxWidth := 0 - for _, item := range items { - if len(item) >= maxWidth { - maxWidth = len(item) + 1 - } - } - numColumns := s.columns / maxWidth - numRows := len(items) / numColumns - if len(items)%numColumns > 0 { - numRows++ - } + numColumns, numRows, maxWidth := calculateColumns(s.columns, items) - if len(items) <= numColumns { - maxWidth = 0 - } for i := 0; i < numRows; i++ { for j := 0; j < numColumns*numRows; j += numRows { if i+j < len(items) { if maxWidth > 0 { - fmt.Printf("%-*s", maxWidth, items[i+j]) + fmt.Printf("%-*.[1]*s", maxWidth, items[i+j]) } else { fmt.Printf("%v ", items[i+j]) } @@ -500,6 +600,9 @@ mainLoop: case rune: switch v { case cr, lf: + if s.multiLineMode { + s.resetMultiLine(p, line, pos) + } fmt.Println() break mainLoop case ctrlA: // Start of line @@ -601,6 +704,9 @@ mainLoop: s.refresh(p, line, pos) case ctrlC: // reset fmt.Println("^C") + if s.multiLineMode { + s.resetMultiLine(p, line, pos) + } if s.ctrlCAborts { return "", ErrPromptAborted } @@ -685,7 +791,7 @@ mainLoop: case 0, 28, 29, 30, 31: fmt.Print(beep) default: - if pos == len(line) && len(p)+len(line) < s.columns-1 { + if pos == len(line) && !s.multiLineMode && countGlyphs(p)+countGlyphs(line) < s.columns-1 { line = append(line, v) fmt.Printf("%c", v) pos++ @@ -710,11 +816,21 @@ mainLoop: } else { fmt.Print(beep) } - case wordLeft: + case wordLeft, altB: if pos > 0 { + var spaceHere, spaceLeft, leftKnown bool for { pos-- - if pos == 0 || unicode.IsSpace(line[pos-1]) { + if pos == 0 { + break + } + if leftKnown { + spaceHere = spaceLeft + } else { + spaceHere = unicode.IsSpace(line[pos]) + } + spaceLeft, leftKnown = unicode.IsSpace(line[pos-1]), true + if !spaceHere && spaceLeft { break } } @@ -727,11 +843,21 @@ mainLoop: } else { fmt.Print(beep) } - case wordRight: + case wordRight, altF: if pos < len(line) { + var spaceHere, spaceLeft, hereKnown bool for { pos++ - if pos == len(line) || unicode.IsSpace(line[pos]) { + if pos == len(line) { + break + } + if hereKnown { + spaceLeft = spaceHere + } else { + spaceLeft = unicode.IsSpace(line[pos-1]) + } + spaceHere, hereKnown = unicode.IsSpace(line[pos]), true + if spaceHere && !spaceLeft { break } } @@ -767,6 +893,19 @@ mainLoop: pos = 0 case end: // End of line pos = len(line) + case winch: // Window change + if s.multiLineMode { + if s.maxRows-s.cursorRows > 0 { + s.moveDown(s.maxRows - s.cursorRows) + } + for i := 0; i < s.maxRows-1; i++ { + s.cursorPos(0) + s.eraseLine() + s.moveUp(1) + } + s.maxRows = 1 + s.cursorRows = 1 + } } s.refresh(p, line, pos) } @@ -814,6 +953,9 @@ mainLoop: case rune: switch v { case cr, lf: + if s.multiLineMode { + s.resetMultiLine(p, line, pos) + } fmt.Println() break mainLoop case ctrlD: // del @@ -838,6 +980,9 @@ mainLoop: } case ctrlC: fmt.Println("^C") + if s.multiLineMode { + s.resetMultiLine(p, line, pos) + } if s.ctrlCAborts { return "", ErrPromptAborted } diff --git a/Godeps/_workspace/src/github.com/peterh/liner/output.go b/cmd/vendor/github.com/peterh/liner/output.go similarity index 86% rename from Godeps/_workspace/src/github.com/peterh/liner/output.go rename to cmd/vendor/github.com/peterh/liner/output.go index e91f4ea..049273b 100644 --- a/Godeps/_workspace/src/github.com/peterh/liner/output.go +++ b/cmd/vendor/github.com/peterh/liner/output.go @@ -31,6 +31,18 @@ func (s *State) eraseScreen() { fmt.Print("\x1b[H\x1b[2J") } +func (s *State) moveUp(lines int) { + fmt.Printf("\x1b[%dA", lines) +} + +func (s *State) moveDown(lines int) { + fmt.Printf("\x1b[%dB", lines) +} + +func (s *State) emitNewLine() { + fmt.Print("\n") +} + type winSize struct { row, col uint16 xpixel, ypixel uint16 diff --git a/Godeps/_workspace/src/github.com/peterh/liner/output_windows.go b/cmd/vendor/github.com/peterh/liner/output_windows.go similarity index 69% rename from Godeps/_workspace/src/github.com/peterh/liner/output_windows.go rename to cmd/vendor/github.com/peterh/liner/output_windows.go index 27ae55a..45cd978 100644 --- a/Godeps/_workspace/src/github.com/peterh/liner/output_windows.go +++ b/cmd/vendor/github.com/peterh/liner/output_windows.go @@ -47,6 +47,24 @@ func (s *State) eraseScreen() { procSetConsoleCursorPosition.Call(uintptr(s.hOut), 0) } +func (s *State) moveUp(lines int) { + var sbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi))) + procSetConsoleCursorPosition.Call(uintptr(s.hOut), + uintptr(int(sbi.dwCursorPosition.x)&0xFFFF|(int(sbi.dwCursorPosition.y)-lines)<<16)) +} + +func (s *State) moveDown(lines int) { + var sbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi))) + procSetConsoleCursorPosition.Call(uintptr(s.hOut), + uintptr(int(sbi.dwCursorPosition.x)&0xFFFF|(int(sbi.dwCursorPosition.y)+lines)<<16)) +} + +func (s *State) emitNewLine() { + // windows doesn't need to omit a new line +} + func (s *State) getColumns() { var sbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi))) diff --git a/Godeps/_workspace/src/github.com/peterh/liner/signal.go b/cmd/vendor/github.com/peterh/liner/signal.go similarity index 100% rename from Godeps/_workspace/src/github.com/peterh/liner/signal.go rename to cmd/vendor/github.com/peterh/liner/signal.go diff --git a/Godeps/_workspace/src/github.com/peterh/liner/signal_legacy.go b/cmd/vendor/github.com/peterh/liner/signal_legacy.go similarity index 100% rename from Godeps/_workspace/src/github.com/peterh/liner/signal_legacy.go rename to cmd/vendor/github.com/peterh/liner/signal_legacy.go diff --git a/Godeps/_workspace/src/github.com/peterh/liner/unixmode.go b/cmd/vendor/github.com/peterh/liner/unixmode.go similarity index 100% rename from Godeps/_workspace/src/github.com/peterh/liner/unixmode.go rename to cmd/vendor/github.com/peterh/liner/unixmode.go diff --git a/Godeps/_workspace/src/github.com/peterh/liner/width.go b/cmd/vendor/github.com/peterh/liner/width.go similarity index 75% rename from Godeps/_workspace/src/github.com/peterh/liner/width.go rename to cmd/vendor/github.com/peterh/liner/width.go index 5c6bf68..d8984aa 100644 --- a/Godeps/_workspace/src/github.com/peterh/liner/width.go +++ b/cmd/vendor/github.com/peterh/liner/width.go @@ -36,6 +36,25 @@ func countGlyphs(s []rune) int { return n } +func countMultiLineGlyphs(s []rune, columns int, start int) int { + n := start + for _, r := range s { + switch { + case unicode.IsOneOf(zeroWidth, r): + case unicode.IsOneOf(doubleWidth, r): + n += 2 + // no room for a 2-glyphs-wide char in the ending + // so skip a column and display it at the beginning + if n%columns == 1 { + n++ + } + default: + n++ + } + } + return n +} + func getPrefixGlyphs(s []rune, num int) []rune { p := 0 for n := 0; n < num && p < len(s); p++ { diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE b/cmd/vendor/github.com/siddontang/go/LICENSE similarity index 97% rename from Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE rename to cmd/vendor/github.com/siddontang/go/LICENSE index 004e77f..80511a0 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE +++ b/cmd/vendor/github.com/siddontang/go/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2013 Ben Johnson +Copyright (c) 2014 siddontang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/Godeps/_workspace/src/github.com/siddontang/go/bson/LICENSE b/cmd/vendor/github.com/siddontang/go/bson/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/bson/LICENSE rename to cmd/vendor/github.com/siddontang/go/bson/LICENSE diff --git a/Godeps/_workspace/src/github.com/siddontang/go/bson/bson.go b/cmd/vendor/github.com/siddontang/go/bson/bson.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/bson/bson.go rename to cmd/vendor/github.com/siddontang/go/bson/bson.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/bson/decode.go b/cmd/vendor/github.com/siddontang/go/bson/decode.go similarity index 99% rename from Godeps/_workspace/src/github.com/siddontang/go/bson/decode.go rename to cmd/vendor/github.com/siddontang/go/bson/decode.go index f1c8b4f..fc991b7 100644 --- a/Godeps/_workspace/src/github.com/siddontang/go/bson/decode.go +++ b/cmd/vendor/github.com/siddontang/go/bson/decode.go @@ -1,18 +1,18 @@ // BSON library for Go -// +// // Copyright (c) 2010-2012 - Gustavo Niemeyer -// +// // All rights reserved. // // Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// +// modification, are permitted provided that the following conditions are met: +// // 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. +// list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// +// and/or other materials provided with the distribution. +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE diff --git a/Godeps/_workspace/src/github.com/siddontang/go/bson/encode.go b/cmd/vendor/github.com/siddontang/go/bson/encode.go similarity index 97% rename from Godeps/_workspace/src/github.com/siddontang/go/bson/encode.go rename to cmd/vendor/github.com/siddontang/go/bson/encode.go index 6544748..036a136 100644 --- a/Godeps/_workspace/src/github.com/siddontang/go/bson/encode.go +++ b/cmd/vendor/github.com/siddontang/go/bson/encode.go @@ -1,18 +1,18 @@ // BSON library for Go -// +// // Copyright (c) 2010-2012 - Gustavo Niemeyer -// +// // All rights reserved. // // Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// +// modification, are permitted provided that the following conditions are met: +// // 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. +// list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// +// and/or other materials provided with the distribution. +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -182,7 +182,7 @@ func isZero(v reflect.Value) bool { if v.Type() == typeTime { return v.Interface().(time.Time).IsZero() } - for i := v.NumField()-1; i >= 0; i-- { + for i := v.NumField() - 1; i >= 0; i-- { if !isZero(v.Field(i)) { return false } @@ -207,7 +207,7 @@ func (e *encoder) addSlice(v reflect.Value) { return } l := v.Len() - et := v.Type().Elem() + et := v.Type().Elem() if et == typeDocElem { for i := 0; i < l; i++ { elem := v.Index(i).Interface().(DocElem) @@ -401,7 +401,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { case time.Time: // MongoDB handles timestamps as milliseconds. e.addElemName('\x09', name) - e.addInt64(s.Unix() * 1000 + int64(s.Nanosecond() / 1e6)) + e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) case url.URL: e.addElemName('\x02', name) diff --git a/Godeps/_workspace/src/github.com/siddontang/go/filelock/LICENSE b/cmd/vendor/github.com/siddontang/go/filelock/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/filelock/LICENSE rename to cmd/vendor/github.com/siddontang/go/filelock/LICENSE diff --git a/Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_generic.go b/cmd/vendor/github.com/siddontang/go/filelock/file_lock_generic.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_generic.go rename to cmd/vendor/github.com/siddontang/go/filelock/file_lock_generic.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_solaris.go b/cmd/vendor/github.com/siddontang/go/filelock/file_lock_solaris.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_solaris.go rename to cmd/vendor/github.com/siddontang/go/filelock/file_lock_solaris.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_unix.go b/cmd/vendor/github.com/siddontang/go/filelock/file_lock_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_unix.go rename to cmd/vendor/github.com/siddontang/go/filelock/file_lock_unix.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_windows.go b/cmd/vendor/github.com/siddontang/go/filelock/file_lock_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/filelock/file_lock_windows.go rename to cmd/vendor/github.com/siddontang/go/filelock/file_lock_windows.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/hack/hack.go b/cmd/vendor/github.com/siddontang/go/hack/hack.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/hack/hack.go rename to cmd/vendor/github.com/siddontang/go/hack/hack.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/ioutil2/ioutil.go b/cmd/vendor/github.com/siddontang/go/ioutil2/ioutil.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/ioutil2/ioutil.go rename to cmd/vendor/github.com/siddontang/go/ioutil2/ioutil.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/ioutil2/sectionwriter.go b/cmd/vendor/github.com/siddontang/go/ioutil2/sectionwriter.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/ioutil2/sectionwriter.go rename to cmd/vendor/github.com/siddontang/go/ioutil2/sectionwriter.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/log/doc.go b/cmd/vendor/github.com/siddontang/go/log/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/log/doc.go rename to cmd/vendor/github.com/siddontang/go/log/doc.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/log/filehandler.go b/cmd/vendor/github.com/siddontang/go/log/filehandler.go similarity index 98% rename from Godeps/_workspace/src/github.com/siddontang/go/log/filehandler.go rename to cmd/vendor/github.com/siddontang/go/log/filehandler.go index 783b652..308896a 100644 --- a/Godeps/_workspace/src/github.com/siddontang/go/log/filehandler.go +++ b/cmd/vendor/github.com/siddontang/go/log/filehandler.go @@ -36,7 +36,7 @@ func (h *FileHandler) Close() error { return h.fd.Close() } -//RotatingFileHandler writes log a file, if file size exceeds maxBytes, +//RotatingFileHandler writes log a file, if file size exceeds maxBytes, //it will backup current file and open a new one. // //max backup file number is set by backupCount, it will delete oldest if backups too many. @@ -112,7 +112,7 @@ func (h *RotatingFileHandler) doRollover() { } } -//TimeRotatingFileHandler writes log to a file, +//TimeRotatingFileHandler writes log to a file, //it will backup current and open a new one, with a period time you sepecified. // //refer: http://docs.python.org/2/library/logging.handlers.html. diff --git a/Godeps/_workspace/src/github.com/siddontang/go/log/handler.go b/cmd/vendor/github.com/siddontang/go/log/handler.go similarity index 93% rename from Godeps/_workspace/src/github.com/siddontang/go/log/handler.go rename to cmd/vendor/github.com/siddontang/go/log/handler.go index 352e30c..4dc086f 100644 --- a/Godeps/_workspace/src/github.com/siddontang/go/log/handler.go +++ b/cmd/vendor/github.com/siddontang/go/log/handler.go @@ -31,8 +31,7 @@ func (h *StreamHandler) Close() error { return nil } - -//NullHandler does nothing, it discards anything. +//NullHandler does nothing, it discards anything. type NullHandler struct { } diff --git a/Godeps/_workspace/src/github.com/siddontang/go/log/log.go b/cmd/vendor/github.com/siddontang/go/log/log.go similarity index 91% rename from Godeps/_workspace/src/github.com/siddontang/go/log/log.go rename to cmd/vendor/github.com/siddontang/go/log/log.go index 74cd76a..f2e5ef2 100644 --- a/Godeps/_workspace/src/github.com/siddontang/go/log/log.go +++ b/cmd/vendor/github.com/siddontang/go/log/log.go @@ -5,6 +5,7 @@ import ( "os" "runtime" "strconv" + "strings" "sync" "sync/atomic" "time" @@ -155,6 +156,25 @@ func (l *Logger) SetLevel(level int) { l.level.Set(level) } +// name can be in ["trace", "debug", "info", "warn", "error", "fatal"] +func (l *Logger) SetLevelByName(name string) { + name = strings.ToLower(name) + switch name { + case "trace": + l.SetLevel(LevelTrace) + case "debug": + l.SetLevel(LevelDebug) + case "info": + l.SetLevel(LevelInfo) + case "warn": + l.SetLevel(LevelWarn) + case "error": + l.SetLevel(LevelError) + case "fatal": + l.SetLevel(LevelFatal) + } +} + func (l *Logger) SetHandler(h Handler) { if l.closed.Get() == 1 { return @@ -288,6 +308,11 @@ func SetLevel(level int) { std.SetLevel(level) } +// name can be in ["trace", "debug", "info", "warn", "error", "fatal"] +func SetLevelByName(name string) { + std.SetLevelByName(name) +} + func SetHandler(h Handler) { std.SetHandler(h) } diff --git a/Godeps/_workspace/src/github.com/siddontang/go/log/sockethandler.go b/cmd/vendor/github.com/siddontang/go/log/sockethandler.go similarity index 95% rename from Godeps/_workspace/src/github.com/siddontang/go/log/sockethandler.go rename to cmd/vendor/github.com/siddontang/go/log/sockethandler.go index ad81ccd..3e7494d 100644 --- a/Godeps/_workspace/src/github.com/siddontang/go/log/sockethandler.go +++ b/cmd/vendor/github.com/siddontang/go/log/sockethandler.go @@ -7,8 +7,8 @@ import ( ) //SocketHandler writes log to a connectionl. -//Network protocol is simple: log length + log | log length + log. log length is uint32, bigendian. -//you must implement your own log server, maybe you can use logd instead simply. +//Network protocol is simple: log length + log | log length + log. log length is uint32, bigendian. +//you must implement your own log server, maybe you can use logd instead simply. type SocketHandler struct { c net.Conn protocol string diff --git a/Godeps/_workspace/src/github.com/siddontang/go/num/bytes.go b/cmd/vendor/github.com/siddontang/go/num/bytes.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/num/bytes.go rename to cmd/vendor/github.com/siddontang/go/num/bytes.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/num/cmp.go b/cmd/vendor/github.com/siddontang/go/num/cmp.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/num/cmp.go rename to cmd/vendor/github.com/siddontang/go/num/cmp.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/num/str.go b/cmd/vendor/github.com/siddontang/go/num/str.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/num/str.go rename to cmd/vendor/github.com/siddontang/go/num/str.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/snappy/LICENSE b/cmd/vendor/github.com/siddontang/go/snappy/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/snappy/LICENSE rename to cmd/vendor/github.com/siddontang/go/snappy/LICENSE diff --git a/Godeps/_workspace/src/github.com/siddontang/go/snappy/decode.go b/cmd/vendor/github.com/siddontang/go/snappy/decode.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/snappy/decode.go rename to cmd/vendor/github.com/siddontang/go/snappy/decode.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/snappy/encode.go b/cmd/vendor/github.com/siddontang/go/snappy/encode.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/snappy/encode.go rename to cmd/vendor/github.com/siddontang/go/snappy/encode.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/snappy/snappy.go b/cmd/vendor/github.com/siddontang/go/snappy/snappy.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/snappy/snappy.go rename to cmd/vendor/github.com/siddontang/go/snappy/snappy.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/sync2/atomic.go b/cmd/vendor/github.com/siddontang/go/sync2/atomic.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/sync2/atomic.go rename to cmd/vendor/github.com/siddontang/go/sync2/atomic.go diff --git a/Godeps/_workspace/src/github.com/siddontang/go/sync2/semaphore.go b/cmd/vendor/github.com/siddontang/go/sync2/semaphore.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/go/sync2/semaphore.go rename to cmd/vendor/github.com/siddontang/go/sync2/semaphore.go diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/LICENSE b/cmd/vendor/github.com/siddontang/golua/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/goredis/LICENSE rename to cmd/vendor/github.com/siddontang/golua/LICENSE diff --git a/vendor/lua/c-golua.c b/cmd/vendor/github.com/siddontang/golua/c-golua.c similarity index 90% rename from vendor/lua/c-golua.c rename to cmd/vendor/github.com/siddontang/golua/c-golua.c index 18dec08..caa7e2a 100644 --- a/vendor/lua/c-golua.c +++ b/cmd/vendor/github.com/siddontang/golua/c-golua.c @@ -1,5 +1,3 @@ -// +build lua - #include #include #include @@ -57,14 +55,15 @@ unsigned int* clua_checkgosomething(lua_State* L, int index, const char *desired } } -GoInterface* clua_getgostate(lua_State* L) +size_t clua_getgostate(lua_State* L) { + size_t gostateindex; //get gostate from registry entry lua_pushlightuserdata(L,(void*)&GoStateRegistryKey); lua_gettable(L, LUA_REGISTRYINDEX); - GoInterface* gip = lua_touserdata(L,-1); + gostateindex = (size_t)lua_touserdata(L,-1); lua_pop(L,1); - return gip; + return gostateindex; } @@ -73,10 +72,10 @@ int callback_function(lua_State* L) { int r; unsigned int *fid = clua_checkgosomething(L, 1, MT_GOFUNCTION); - GoInterface* gi = clua_getgostate(L); + size_t gostateindex = clua_getgostate(L); //remove the go function from the stack (to present same behavior as lua_CFunctions) lua_remove(L,1); - return golua_callgofunction(*gi, fid!=NULL ? *fid : -1); + return golua_callgofunction(gostateindex, fid!=NULL ? *fid : -1); } //wrapper for gchook @@ -84,9 +83,9 @@ int gchook_wrapper(lua_State* L) { //printf("Garbage collection wrapper\n"); unsigned int* fid = clua_checkgosomething(L, -1, NULL); - GoInterface* gi = clua_getgostate(L); + size_t gostateindex = clua_getgostate(L); if (fid != NULL) - return golua_gchook(*gi,*fid); + return golua_gchook(gostateindex,*fid); return 0; } @@ -113,8 +112,8 @@ void clua_pushgofunction(lua_State* L, unsigned int fid) static int callback_c (lua_State* L) { int fid = clua_togofunction(L,lua_upvalueindex(1)); - GoInterface *gi = clua_getgostate(L); - return golua_callgofunction(*gi,fid); + size_t gostateindex = clua_getgostate(L); + return golua_callgofunction(gostateindex,fid); } void clua_pushcallback(lua_State* L) @@ -137,14 +136,11 @@ int default_panicf(lua_State *L) abort(); } -void clua_setgostate(lua_State* L, GoInterface gi) +void clua_setgostate(lua_State* L, size_t gostateindex) { lua_atpanic(L, default_panicf); lua_pushlightuserdata(L,(void*)&GoStateRegistryKey); - GoInterface* gip = (GoInterface*)lua_newuserdata(L,sizeof(GoInterface)); - //copy interface value to userdata - gip->v = gi.v; - gip->t = gi.t; + lua_pushlightuserdata(L, (void*)gostateindex); //set into registry table lua_settable(L, LUA_REGISTRYINDEX); } @@ -166,9 +162,9 @@ int interface_index_callback(lua_State *L) return 1; } - GoInterface* gi = clua_getgostate(L); + size_t gostateindex = clua_getgostate(L); - int r = golua_interface_index_callback(*gi, *iid, field_name); + int r = golua_interface_index_callback(gostateindex, *iid, field_name); if (r < 0) { @@ -198,9 +194,9 @@ int interface_newindex_callback(lua_State *L) return 1; } - GoInterface* gi = clua_getgostate(L); + size_t gostateindex = clua_getgostate(L); - int r = golua_interface_newindex_callback(*gi, *iid, field_name); + int r = golua_interface_newindex_callback(gostateindex, *iid, field_name); if (r < 0) { @@ -215,8 +211,8 @@ int interface_newindex_callback(lua_State *L) int panic_msghandler(lua_State *L) { - GoInterface* gi = clua_getgostate(L); - go_panic_msghandler(*gi, (char *)lua_tolstring(L, -1, NULL)); + size_t gostateindex = clua_getgostate(L); + go_panic_msghandler(gostateindex, (char *)lua_tolstring(L, -1, NULL)); return 0; } @@ -277,8 +273,8 @@ int callback_panicf(lua_State* L) lua_gettable(L,LUA_REGISTRYINDEX); unsigned int fid = lua_tointeger(L,-1); lua_pop(L,1); - GoInterface* gi = clua_getgostate(L); - return golua_callpanicfunction(*gi,fid); + size_t gostateindex = clua_getgostate(L); + return golua_callpanicfunction(gostateindex,fid); } @@ -396,6 +392,7 @@ void clua_setexecutionlimit(lua_State* L, int n) lua_sethook(L, &clua_hook_function, LUA_MASKCOUNT, n); } + LUALIB_API int (luaopen_cjson) (lua_State *L); LUALIB_API int (luaopen_struct) (lua_State *L); LUALIB_API int (luaopen_cmsgpack) (lua_State *L); @@ -419,4 +416,4 @@ void clua_opencmsgpack(lua_State* L) lua_pushcfunction(L,&luaopen_cmsgpack); lua_pushstring(L,"cmsgpack"); lua_call(L, 1, 0); -} +} \ No newline at end of file diff --git a/cmd/vendor/github.com/siddontang/golua/doc.go b/cmd/vendor/github.com/siddontang/golua/doc.go new file mode 100644 index 0000000..07534cc --- /dev/null +++ b/cmd/vendor/github.com/siddontang/golua/doc.go @@ -0,0 +1 @@ +package lua diff --git a/vendor/lua/golua.go b/cmd/vendor/github.com/siddontang/golua/golua.go similarity index 77% rename from vendor/lua/golua.go rename to cmd/vendor/github.com/siddontang/golua/golua.go index 5cbe494..a7a74a9 100644 --- a/vendor/lua/golua.go +++ b/cmd/vendor/github.com/siddontang/golua/golua.go @@ -1,8 +1,8 @@ -// +build lua - package lua /* +#cgo CFLAGS: -Ilua + #include #include #include @@ -11,6 +11,7 @@ import "C" import ( "reflect" + "sync" "unsafe" ) @@ -26,6 +27,9 @@ type State struct { // Wrapped lua_State object s *C.lua_State + // index of this object inside the goStates array + Index uintptr + // Registry of go object that have been pushed to Lua VM registry []interface{} @@ -33,9 +37,35 @@ type State struct { freeIndices []uint } +var goStates map[uintptr]*State +var goStatesMutex sync.Mutex + +func init() { + goStates = make(map[uintptr]*State, 16) +} + +func registerGoState(L *State) { + goStatesMutex.Lock() + defer goStatesMutex.Unlock() + L.Index = uintptr(unsafe.Pointer(L)) + goStates[L.Index] = L +} + +func unregisterGoState(L *State) { + goStatesMutex.Lock() + defer goStatesMutex.Unlock() + delete(goStates, L.Index) +} + +func getGoState(gostateindex uintptr) *State { + goStatesMutex.Lock() + defer goStatesMutex.Unlock() + return goStates[gostateindex] +} + //export golua_callgofunction -func golua_callgofunction(L interface{}, fid uint) int { - L1 := L.(*State) +func golua_callgofunction(gostateindex uintptr, fid uint) int { + L1 := getGoState(gostateindex) if fid < 0 { panic(&LuaError{0, "Requested execution of an unknown function", L1.StackTrace()}) } @@ -44,8 +74,8 @@ func golua_callgofunction(L interface{}, fid uint) int { } //export golua_interface_newindex_callback -func golua_interface_newindex_callback(Li interface{}, iid uint, field_name_cstr *C.char) int { - L := Li.(*State) +func golua_interface_newindex_callback(gostateindex uintptr, iid uint, field_name_cstr *C.char) int { + L := getGoState(gostateindex) iface := L.registry[iid] ifacevalue := reflect.ValueOf(iface).Elem() @@ -129,8 +159,8 @@ func golua_interface_newindex_callback(Li interface{}, iid uint, field_name_cstr } //export golua_interface_index_callback -func golua_interface_index_callback(Li interface{}, iid uint, field_name *C.char) int { - L := Li.(*State) +func golua_interface_index_callback(gostateindex uintptr, iid uint, field_name *C.char) int { + L := getGoState(gostateindex) iface := L.registry[iid] ifacevalue := reflect.ValueOf(iface).Elem() @@ -185,15 +215,15 @@ func golua_interface_index_callback(Li interface{}, iid uint, field_name *C.char } //export golua_gchook -func golua_gchook(L interface{}, id uint) int { - L1 := L.(*State) +func golua_gchook(gostateindex uintptr, id uint) int { + L1 := getGoState(gostateindex) L1.unregister(id) return 0 } //export golua_callpanicfunction -func golua_callpanicfunction(L interface{}, id uint) int { - L1 := L.(*State) +func golua_callpanicfunction(gostateindex uintptr, id uint) int { + L1 := getGoState(gostateindex) f := L1.registry[id].(LuaGoFunction) return f(L1) } @@ -214,8 +244,8 @@ func golua_callallocf(fp uintptr, ptr uintptr, osize uint, nsize uint) uintptr { } //export go_panic_msghandler -func go_panic_msghandler(Li interface{}, z *C.char) { - L := Li.(*State) +func go_panic_msghandler(gostateindex uintptr, z *C.char) { + L := getGoState(gostateindex) s := C.GoString(z) panic(&LuaError{LUA_ERRERR, s, L.StackTrace()}) diff --git a/vendor/lua/golua.h b/cmd/vendor/github.com/siddontang/golua/golua.h similarity index 88% rename from vendor/lua/golua.h rename to cmd/vendor/github.com/siddontang/golua/golua.h index 5d5b989..01c043d 100644 --- a/vendor/lua/golua.h +++ b/cmd/vendor/github.com/siddontang/golua/golua.h @@ -1,5 +1,3 @@ -// +build lua - #include typedef struct { void *t; void *v; } GoInterface; @@ -15,8 +13,8 @@ unsigned int clua_togostruct(lua_State *L, int index); void clua_pushcallback(lua_State* L); void clua_pushgofunction(lua_State* L, unsigned int fid); void clua_pushgostruct(lua_State *L, unsigned int fid); -void clua_setgostate(lua_State* L, GoInterface gostate); -GoInterface* clua_getgostate(lua_State* L); +void clua_setgostate(lua_State* L, size_t gostateindex); +size_t clua_getgostate(lua_State* L); GoInterface clua_atpanic(lua_State* L, unsigned int panicf_id); int clua_callluacfunc(lua_State* L, lua_CFunction f); lua_State* clua_newstate(void* goallocf); @@ -36,4 +34,4 @@ int clua_isgostruct(lua_State *L, int n); void clua_opencjson(lua_State* L); void clua_openstruct(lua_State* L); -void clua_opencmsgpack(lua_State* L); +void clua_opencmsgpack(lua_State* L); \ No newline at end of file diff --git a/vendor/lua/golua_license b/cmd/vendor/github.com/siddontang/golua/golua_license similarity index 100% rename from vendor/lua/golua_license rename to cmd/vendor/github.com/siddontang/golua/golua_license diff --git a/vendor/lua/lauxlib.go b/cmd/vendor/github.com/siddontang/golua/lauxlib.go similarity index 96% rename from vendor/lua/lauxlib.go rename to cmd/vendor/github.com/siddontang/golua/lauxlib.go index 4ce3b7c..0ba5984 100644 --- a/vendor/lua/lauxlib.go +++ b/cmd/vendor/github.com/siddontang/golua/lauxlib.go @@ -1,5 +1,3 @@ -// +build lua - package lua //#include @@ -29,8 +27,9 @@ func (err *LuaError) StackTrace() []LuaStackEntry { } // luaL_argcheck -func (L *State) ArgCheck(cond bool, narg int, extramsg string) { - if cond { +// WARNING: before b30b2c62c6712c6683a9d22ff0abfa54c8267863 the function ArgCheck had the opposite behaviour +func (L *State) Argcheck(cond bool, narg int, extramsg string) { + if !cond { Cextramsg := C.CString(extramsg) defer C.free(unsafe.Pointer(Cextramsg)) C.luaL_argerror(L.s, C.int(narg), Cextramsg) diff --git a/vendor/lua/lua.go b/cmd/vendor/github.com/siddontang/golua/lua.go similarity index 95% rename from vendor/lua/lua.go rename to cmd/vendor/github.com/siddontang/golua/lua.go index 6a5ef65..348ebce 100644 --- a/vendor/lua/lua.go +++ b/cmd/vendor/github.com/siddontang/golua/lua.go @@ -1,5 +1,3 @@ -// +build lua - // This package provides access to the excellent lua language interpreter from go code. // // Access to most of the functions in lua.h and lauxlib.h is provided as well as additional convenience functions to publish Go objects and functions to lua code. @@ -11,7 +9,7 @@ package lua #cgo CFLAGS: -Ilua #cgo llua LDFLAGS: -llua #cgo luaa LDFLAGS: -llua -lm -ldl -#cgo linux,!llua,!luaa LDFLAGS: -llua5.1 -lm +#cgo linux,!llua,!luaa LDFLAGS: -llua5.1 #cgo darwin,!luaa pkg-config: lua5.1 #cgo freebsd,!luaa LDFLAGS: -llua-5.1 @@ -34,12 +32,9 @@ type LuaStackEntry struct { } func newState(L *C.lua_State) *State { - var newstatei interface{} - newstate := &State{L, make([]interface{}, 0, 8), make([]uint, 0, 8)} - newstatei = newstate - ns1 := unsafe.Pointer(&newstatei) - ns2 := (*C.GoInterface)(ns1) - C.clua_setgostate(L, *ns2) //hacky.... + newstate := &State{L, 0, make([]interface{}, 0, 8), make([]uint, 0, 8)} + registerGoState(newstate) + C.clua_setgostate(L, C.size_t(newstate.Index)) C.clua_initstate(L) return newstate } @@ -106,6 +101,14 @@ func (L *State) PushGoFunction(f LuaGoFunction) { C.clua_pushgofunction(L.s, C.uint(fid)) } +// PushGoClosure pushes a lua.LuaGoFunction to the stack wrapped in a Closure. +// this permits the go function to reflect lua type 'function' when checking with type() +// this implements behaviour akin to lua_pushcfunction() in lua C API. +func (L *State) PushGoClosure(f LuaGoFunction) { + L.PushGoFunction(f) // leaves Go function userdata on stack + C.clua_pushcallback(L.s) // wraps the userdata object with a closure making it into a function +} + // Sets a metamethod to execute a go function // // The code: @@ -221,6 +224,7 @@ func (L *State) CheckStack(extra int) bool { // lua_close func (L *State) Close() { C.lua_close(L.s) + unregisterGoState(L) } // lua_concat @@ -343,7 +347,7 @@ func (L *State) NewThread() *State { //TODO: should have same lists as parent // but may complicate gc s := C.lua_newthread(L.s) - return &State{s, nil, nil} + return &State{s, 0, nil, nil} } // lua_next diff --git a/vendor/lua/lua_cjson.c b/cmd/vendor/github.com/siddontang/golua/lua_cjson.c similarity index 99% rename from vendor/lua/lua_cjson.c rename to cmd/vendor/github.com/siddontang/golua/lua_cjson.c index cd3c19f..d71ceb5 100644 --- a/vendor/lua/lua_cjson.c +++ b/cmd/vendor/github.com/siddontang/golua/lua_cjson.c @@ -1,5 +1,3 @@ -// +build lua - #define VERSION "1.0.3" /* CJSON - JSON support for Lua diff --git a/vendor/lua/lua_cmsgpack.c b/cmd/vendor/github.com/siddontang/golua/lua_cmsgpack.c similarity index 99% rename from vendor/lua/lua_cmsgpack.c rename to cmd/vendor/github.com/siddontang/golua/lua_cmsgpack.c index 1589f16..0018640 100644 --- a/vendor/lua/lua_cmsgpack.c +++ b/cmd/vendor/github.com/siddontang/golua/lua_cmsgpack.c @@ -1,5 +1,3 @@ -// +build lua - #include #include #include diff --git a/vendor/lua/lua_defs.go b/cmd/vendor/github.com/siddontang/golua/lua_defs.go similarity index 97% rename from vendor/lua/lua_defs.go rename to cmd/vendor/github.com/siddontang/golua/lua_defs.go index 2f1c288..008f189 100644 --- a/vendor/lua/lua_defs.go +++ b/cmd/vendor/github.com/siddontang/golua/lua_defs.go @@ -1,11 +1,9 @@ -// +build lua - package lua /* #include -#include -#include + #include + #include */ import "C" diff --git a/vendor/lua/lua_struct.c b/cmd/vendor/github.com/siddontang/golua/lua_struct.c similarity index 99% rename from vendor/lua/lua_struct.c rename to cmd/vendor/github.com/siddontang/golua/lua_struct.c index 9d391a0..3c31531 100644 --- a/vendor/lua/lua_struct.c +++ b/cmd/vendor/github.com/siddontang/golua/lua_struct.c @@ -1,5 +1,3 @@ -// +build lua - /* ** {====================================================== ** Library for packing/unpacking structures. diff --git a/vendor/lua/strbuf.c b/cmd/vendor/github.com/siddontang/golua/strbuf.c similarity index 99% rename from vendor/lua/strbuf.c rename to cmd/vendor/github.com/siddontang/golua/strbuf.c index 7f85165..976925a 100644 --- a/vendor/lua/strbuf.c +++ b/cmd/vendor/github.com/siddontang/golua/strbuf.c @@ -1,5 +1,3 @@ -// +build lua - /* strbuf - string buffer routines * * Copyright (c) 2010-2011 Mark Pulford diff --git a/vendor/lua/strbuf.h b/cmd/vendor/github.com/siddontang/golua/strbuf.h similarity index 99% rename from vendor/lua/strbuf.h rename to cmd/vendor/github.com/siddontang/golua/strbuf.h index 3039a62..f856543 100644 --- a/vendor/lua/strbuf.h +++ b/cmd/vendor/github.com/siddontang/golua/strbuf.h @@ -1,5 +1,3 @@ -// +build lua - /* strbuf - String buffer routines * * Copyright (c) 2010-2011 Mark Pulford diff --git a/vendor/lua/LICENSE b/cmd/vendor/github.com/siddontang/goredis/LICENSE similarity index 100% rename from vendor/lua/LICENSE rename to cmd/vendor/github.com/siddontang/goredis/LICENSE diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/client.go b/cmd/vendor/github.com/siddontang/goredis/client.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/goredis/client.go rename to cmd/vendor/github.com/siddontang/goredis/client.go diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/conn.go b/cmd/vendor/github.com/siddontang/goredis/conn.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/goredis/conn.go rename to cmd/vendor/github.com/siddontang/goredis/conn.go diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/doc.go b/cmd/vendor/github.com/siddontang/goredis/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/goredis/doc.go rename to cmd/vendor/github.com/siddontang/goredis/doc.go diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/garyburd_license b/cmd/vendor/github.com/siddontang/goredis/garyburd_license similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/goredis/garyburd_license rename to cmd/vendor/github.com/siddontang/goredis/garyburd_license diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/reply.go b/cmd/vendor/github.com/siddontang/goredis/reply.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/goredis/reply.go rename to cmd/vendor/github.com/siddontang/goredis/reply.go diff --git a/Godeps/_workspace/src/github.com/siddontang/goredis/resp.go b/cmd/vendor/github.com/siddontang/goredis/resp.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/goredis/resp.go rename to cmd/vendor/github.com/siddontang/goredis/resp.go diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/LICENSE b/cmd/vendor/github.com/siddontang/rdb/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/rdb/LICENSE rename to cmd/vendor/github.com/siddontang/rdb/LICENSE diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/README.md b/cmd/vendor/github.com/siddontang/rdb/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/rdb/README.md rename to cmd/vendor/github.com/siddontang/rdb/README.md diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/decode.go b/cmd/vendor/github.com/siddontang/rdb/decode.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/rdb/decode.go rename to cmd/vendor/github.com/siddontang/rdb/decode.go diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/digest.go b/cmd/vendor/github.com/siddontang/rdb/digest.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/rdb/digest.go rename to cmd/vendor/github.com/siddontang/rdb/digest.go diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/encode.go b/cmd/vendor/github.com/siddontang/rdb/encode.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/rdb/encode.go rename to cmd/vendor/github.com/siddontang/rdb/encode.go diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/loader.go b/cmd/vendor/github.com/siddontang/rdb/loader.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/rdb/loader.go rename to cmd/vendor/github.com/siddontang/rdb/loader.go diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/reader.go b/cmd/vendor/github.com/siddontang/rdb/reader.go similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/rdb/reader.go rename to cmd/vendor/github.com/siddontang/rdb/reader.go diff --git a/Godeps/_workspace/src/github.com/siddontang/rdb/wandoujia-license b/cmd/vendor/github.com/siddontang/rdb/wandoujia-license similarity index 100% rename from Godeps/_workspace/src/github.com/siddontang/rdb/wandoujia-license rename to cmd/vendor/github.com/siddontang/rdb/wandoujia-license diff --git a/cmd/vendor/github.com/syndtr/goleveldb/LICENSE b/cmd/vendor/github.com/syndtr/goleveldb/LICENSE new file mode 100644 index 0000000..4a772d1 --- /dev/null +++ b/cmd/vendor/github.com/syndtr/goleveldb/LICENSE @@ -0,0 +1,24 @@ +Copyright 2012 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/batch.go similarity index 80% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/batch.go index ccf390c..5010067 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/batch.go @@ -12,8 +12,10 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" ) +// ErrBatchCorrupted records reason of batch corruption. type ErrBatchCorrupted struct { Reason string } @@ -23,7 +25,7 @@ func (e *ErrBatchCorrupted) Error() string { } func newErrBatchCorrupted(reason string) error { - return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason}) + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason}) } const ( @@ -31,6 +33,7 @@ const ( batchGrowRec = 3000 ) +// BatchReplay wraps basic batch operations. type BatchReplay interface { Put(key, value []byte) Delete(key []byte) @@ -67,20 +70,20 @@ func (b *Batch) grow(n int) { } } -func (b *Batch) appendRec(kt kType, key, value []byte) { +func (b *Batch) appendRec(kt keyType, key, value []byte) { n := 1 + binary.MaxVarintLen32 + len(key) - if kt == ktVal { + if kt == keyTypeVal { n += binary.MaxVarintLen32 + len(value) } b.grow(n) off := len(b.data) data := b.data[:off+n] data[off] = byte(kt) - off += 1 + off++ off += binary.PutUvarint(data[off:], uint64(len(key))) copy(data[off:], key) off += len(key) - if kt == ktVal { + if kt == keyTypeVal { off += binary.PutUvarint(data[off:], uint64(len(value))) copy(data[off:], value) off += len(value) @@ -94,13 +97,13 @@ func (b *Batch) appendRec(kt kType, key, value []byte) { // Put appends 'put operation' of the given key/value pair to the batch. // It is safe to modify the contents of the argument after Put returns. func (b *Batch) Put(key, value []byte) { - b.appendRec(ktVal, key, value) + b.appendRec(keyTypeVal, key, value) } // Delete appends 'delete operation' of the given key to the batch. // It is safe to modify the contents of the argument after Delete returns. func (b *Batch) Delete(key []byte) { - b.appendRec(ktDel, key, nil) + b.appendRec(keyTypeDel, key, nil) } // Dump dumps batch contents. The returned slice can be loaded into the @@ -121,13 +124,14 @@ func (b *Batch) Load(data []byte) error { // Replay replays batch contents. func (b *Batch) Replay(r BatchReplay) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { + return b.decodeRec(func(i int, kt keyType, key, value []byte) error { switch kt { - case ktVal: + case keyTypeVal: r.Put(key, value) - case ktDel: + case keyTypeDel: r.Delete(key) } + return nil }) } @@ -154,6 +158,7 @@ func (b *Batch) append(p *Batch) { b.grow(len(p.data) - batchHdrLen) b.data = append(b.data, p.data[batchHdrLen:]...) b.rLen += p.rLen + b.bLen += p.bLen } if p.sync { b.sync = true @@ -193,18 +198,19 @@ func (b *Batch) decode(prevSeq uint64, data []byte) error { return nil } -func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) { +func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error { off := batchHdrLen for i := 0; i < b.rLen; i++ { if off >= len(b.data) { return newErrBatchCorrupted("invalid records length") } - kt := kType(b.data[off]) - if kt > ktVal { + kt := keyType(b.data[off]) + if kt > keyTypeVal { + panic(kt) return newErrBatchCorrupted("bad record: invalid type") } - off += 1 + off++ x, n := binary.Uvarint(b.data[off:]) off += n @@ -214,7 +220,7 @@ func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error key := b.data[off : off+int(x)] off += int(x) var value []byte - if kt == ktVal { + if kt == keyTypeVal { x, n := binary.Uvarint(b.data[off:]) off += n if n <= 0 || off+int(x) > len(b.data) { @@ -224,16 +230,19 @@ func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error off += int(x) } - f(i, kt, key, value) + if err := f(i, kt, key, value); err != nil { + return err + } } return nil } func (b *Batch) memReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Put(ikey, value) + var ikScratch []byte + return b.decodeRec(func(i int, kt keyType, key, value []byte) error { + ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt) + return to.Put(ikScratch, value) }) } @@ -245,8 +254,9 @@ func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) er } func (b *Batch) revertMemReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Delete(ikey) + var ikScratch []byte + return b.decodeRec(func(i int, kt keyType, key, value []byte) error { + ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt) + return to.Delete(ikScratch) }) } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go similarity index 95% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go index c9670de..a287d0e 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go @@ -47,17 +47,21 @@ type Cacher interface { // so the the Release method will be called once object is released. type Value interface{} -type CacheGetter struct { +// NamespaceGetter provides convenient wrapper for namespace. +type NamespaceGetter struct { Cache *Cache NS uint64 } -func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { +// Get simply calls Cache.Get() method. +func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { return g.Cache.Get(g.NS, key, setFunc) } // The hash tables implementation is based on: -// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014. +// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, +// Kunlong Zhang, and Michael Spear. +// ACM Symposium on Principles of Distributed Computing, Jul 2014. const ( mInitialSize = 1 << 4 @@ -610,10 +614,12 @@ func (n *Node) unrefLocked() { } } +// Handle is a 'cache handle' of a 'cache node'. type Handle struct { n unsafe.Pointer // *Node } +// Value returns the value of the 'cache node'. func (h *Handle) Value() Value { n := (*Node)(atomic.LoadPointer(&h.n)) if n != nil { @@ -622,6 +628,8 @@ func (h *Handle) Value() Value { return nil } +// Release releases this 'cache handle'. +// It is safe to call release multiple times. func (h *Handle) Release() { nPtr := atomic.LoadPointer(&h.n) if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go similarity index 83% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go index d33d5e9..248bf7c 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go @@ -33,9 +33,9 @@ func (icmp *iComparer) Name() string { } func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) + x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey()) if x == 0 { - if m, n := iKey(a).num(), iKey(b).num(); m > n { + if m, n := internalKey(a).num(), internalKey(b).num(); m > n { x = -1 } else if m < n { x = 1 @@ -45,13 +45,13 @@ func (icmp *iComparer) Compare(a, b []byte) int { } func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := iKey(a).ukey(), iKey(b).ukey() + ua, ub := internalKey(a).ukey(), internalKey(b).ukey() dst = icmp.ucmp.Separator(dst, ua, ub) if dst == nil { return nil } if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - dst = append(dst, kMaxNumBytes...) + dst = append(dst, keyMaxNumBytes...) } else { // Did not close possibilities that n maybe longer than len(ub). dst = append(dst, a[len(a)-8:]...) @@ -60,13 +60,13 @@ func (icmp *iComparer) Separator(dst, a, b []byte) []byte { } func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := iKey(b).ukey() + ub := internalKey(b).ukey() dst = icmp.ucmp.Successor(dst, ub) if dst == nil { return nil } if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - dst = append(dst, kMaxNumBytes...) + dst = append(dst, keyMaxNumBytes...) } else { // Did not close possibilities that n maybe longer than len(ub). dst = append(dst, b[len(b)-8:]...) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db.go similarity index 78% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/db.go index 88a3e0d..eb6abd0 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db.go @@ -36,14 +36,14 @@ type DB struct { s *session // MemDB. - memMu sync.RWMutex - memPool chan *memdb.DB - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFile storage.File - frozenJournalFile storage.File - frozenSeq uint64 + memMu sync.RWMutex + memPool chan *memdb.DB + mem, frozenMem *memDB + journal *journal.Writer + journalWriter storage.Writer + journalFd storage.FileDesc + frozenJournalFd storage.FileDesc + frozenSeq uint64 // Snapshot. snapsMu sync.Mutex @@ -61,8 +61,10 @@ type DB struct { writeDelayN int journalC chan *Batch journalAckC chan error + tr *Transaction // Compaction. + compCommitLk sync.Mutex tcompCmdC chan cCmd tcompPauseC chan chan<- struct{} mcompCmdC chan cCmd @@ -70,7 +72,8 @@ type DB struct { compPerErrC chan error compErrSetC chan error compWriteLocking bool - compStats []cStats + compStats cStats + memdbMaxLevel int // For testing. // Close. closeW sync.WaitGroup @@ -104,7 +107,6 @@ func openDB(s *session) (*DB, error) { compErrC: make(chan error), compPerErrC: make(chan error), compErrSetC: make(chan error), - compStats: make([]cStats, s.o.GetNumLevel()), // Close closeC: make(chan struct{}), } @@ -209,7 +211,7 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { // The returned DB instance is goroutine-safe. // The DB must be closed after use, by calling Close method. func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) + stor, err := storage.OpenFile(path, o.GetReadOnly()) if err != nil { return } @@ -259,7 +261,7 @@ func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { // The returned DB instance is goroutine-safe. // The DB must be closed after use, by calling Close method. func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) + stor, err := storage.OpenFile(path, false) if err != nil { return } @@ -278,12 +280,11 @@ func recoverTable(s *session, o *opt.Options) error { o.Strict &= ^opt.StrictReader // Get all tables and sort it by file number. - tableFiles_, err := s.getFiles(storage.TypeTable) + fds, err := s.stor.List(storage.TypeTable) if err != nil { return err } - tableFiles := files(tableFiles_) - tableFiles.sort() + sortFds(fds) var ( maxSeq uint64 @@ -296,17 +297,17 @@ func recoverTable(s *session, o *opt.Options) error { rec = &sessionRecord{} bpool = util.NewBufferPool(o.GetBlockSize() + 5) ) - buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { - tmp = s.newTemp() - writer, err := tmp.Create() + buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) { + tmpFd = s.newTemp() + writer, err := s.stor.Create(tmpFd) if err != nil { return } defer func() { writer.Close() if err != nil { - tmp.Remove() - tmp = nil + s.stor.Remove(tmpFd) + tmpFd = storage.FileDesc{} } }() @@ -314,7 +315,7 @@ func recoverTable(s *session, o *opt.Options) error { tw := table.NewWriter(writer, o) for iter.Next() { key := iter.Key() - if validIkey(key) { + if validInternalKey(key) { err = tw.Append(key, iter.Value()) if err != nil { return @@ -338,9 +339,9 @@ func recoverTable(s *session, o *opt.Options) error { size = int64(tw.BytesLen()) return } - recoverTable := func(file storage.File) error { - s.logf("table@recovery recovering @%d", file.Num()) - reader, err := file.Open() + recoverTable := func(fd storage.FileDesc) error { + s.logf("table@recovery recovering @%d", fd.Num) + reader, err := s.stor.Open(fd) if err != nil { return err } @@ -362,7 +363,7 @@ func recoverTable(s *session, o *opt.Options) error { tgoodKey, tcorruptedKey, tcorruptedBlock int imin, imax []byte ) - tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o) + tr, err := table.NewReader(reader, size, fd, nil, bpool, o) if err != nil { return err } @@ -370,7 +371,7 @@ func recoverTable(s *session, o *opt.Options) error { if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { itererr.SetErrorCallback(func(err error) { if errors.IsCorrupted(err) { - s.logf("table@recovery block corruption @%d %q", file.Num(), err) + s.logf("table@recovery block corruption @%d %q", fd.Num, err) tcorruptedBlock++ } }) @@ -379,7 +380,7 @@ func recoverTable(s *session, o *opt.Options) error { // Scan the table. for iter.Next() { key := iter.Key() - _, seq, _, kerr := parseIkey(key) + _, seq, _, kerr := parseInternalKey(key) if kerr != nil { tcorruptedKey++ continue @@ -405,23 +406,23 @@ func recoverTable(s *session, o *opt.Options) error { if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { droppedTable++ - s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) return nil } if tgoodKey > 0 { if tcorruptedKey > 0 || tcorruptedBlock > 0 { // Rebuild the table. - s.logf("table@recovery rebuilding @%d", file.Num()) + s.logf("table@recovery rebuilding @%d", fd.Num) iter := tr.NewIterator(nil, nil) - tmp, newSize, err := buildTable(iter) + tmpFd, newSize, err := buildTable(iter) iter.Release() if err != nil { return err } closed = true reader.Close() - if err := file.Replace(tmp); err != nil { + if err := s.stor.Rename(tmpFd, fd); err != nil { return err } size = newSize @@ -431,30 +432,30 @@ func recoverTable(s *session, o *opt.Options) error { } recoveredKey += tgoodKey // Add table to level 0. - rec.addTable(0, file.Num(), uint64(size), imin, imax) - s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + rec.addTable(0, fd.Num, size, imin, imax) + s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) } else { droppedTable++ - s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size) + s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size) } return nil } // Recover all tables. - if len(tableFiles) > 0 { - s.logf("table@recovery F·%d", len(tableFiles)) + if len(fds) > 0 { + s.logf("table@recovery F·%d", len(fds)) // Mark file number as used. - s.markFileNum(tableFiles[len(tableFiles)-1].Num()) + s.markFileNum(fds[len(fds)-1].Num) - for _, file := range tableFiles { - if err := recoverTable(file); err != nil { + for _, fd := range fds { + if err := recoverTable(fd); err != nil { return err } } - s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq) + s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq) } // Set sequence number. @@ -471,31 +472,31 @@ func recoverTable(s *session, o *opt.Options) error { func (db *DB) recoverJournal() error { // Get all journals and sort it by file number. - allJournalFiles, err := db.s.getFiles(storage.TypeJournal) + rawFds, err := db.s.stor.List(storage.TypeJournal) if err != nil { return err } - files(allJournalFiles).sort() + sortFds(rawFds) // Journals that will be recovered. - var recJournalFiles []storage.File - for _, jf := range allJournalFiles { - if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { - recJournalFiles = append(recJournalFiles, jf) + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) } } var ( - of storage.File // Obsolete file. + ofd storage.FileDesc // Obsolete file. rec = &sessionRecord{} ) // Recover journals. - if len(recJournalFiles) > 0 { - db.logf("journal@recovery F·%d", len(recJournalFiles)) + if len(fds) > 0 { + db.logf("journal@recovery F·%d", len(fds)) // Mark file number as used. - db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num()) + db.s.markFileNum(fds[len(fds)-1].Num) var ( // Options. @@ -509,31 +510,31 @@ func (db *DB) recoverJournal() error { batch = &Batch{} ) - for _, jf := range recJournalFiles { - db.logf("journal@recovery recovering @%d", jf.Num()) + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) - fr, err := jf.Open() + fr, err := db.s.stor.Open(fd) if err != nil { return err } // Create or reset journal reader instance. if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { - jr.Reset(fr, dropper{db.s, jf}, strict, checksum) + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Flush memdb and remove obsolete journal file. - if of != nil { + if !ofd.Nil() { if mdb.Len() > 0 { - if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { fr.Close() return err } } - rec.setJournalNum(jf.Num()) + rec.setJournalNum(fd.Num) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { fr.Close() @@ -541,8 +542,8 @@ func (db *DB) recoverJournal() error { } rec.resetAddedTables() - of.Remove() - of = nil + db.s.stor.Remove(ofd) + ofd = storage.FileDesc{} } // Replay journal to memdb. @@ -555,7 +556,7 @@ func (db *DB) recoverJournal() error { } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } buf.Reset() @@ -566,7 +567,7 @@ func (db *DB) recoverJournal() error { } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { @@ -576,7 +577,7 @@ func (db *DB) recoverJournal() error { } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } // Save sequence number. @@ -594,7 +595,7 @@ func (db *DB) recoverJournal() error { } fr.Close() - of = jf + ofd = fd } // Flush the last memdb. @@ -611,7 +612,7 @@ func (db *DB) recoverJournal() error { } // Commit. - rec.setJournalNum(db.journalFile.Num()) + rec.setJournalNum(db.journalFd.Num) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { // Close journal on error. @@ -623,8 +624,8 @@ func (db *DB) recoverJournal() error { } // Remove the last obsolete journal file. - if of != nil { - of.Remove() + if !ofd.Nil() { + db.s.stor.Remove(ofd) } return nil @@ -632,17 +633,17 @@ func (db *DB) recoverJournal() error { func (db *DB) recoverJournalRO() error { // Get all journals and sort it by file number. - allJournalFiles, err := db.s.getFiles(storage.TypeJournal) + rawFds, err := db.s.stor.List(storage.TypeJournal) if err != nil { return err } - files(allJournalFiles).sort() + sortFds(rawFds) // Journals that will be recovered. - var recJournalFiles []storage.File - for _, jf := range allJournalFiles { - if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { - recJournalFiles = append(recJournalFiles, jf) + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) } } @@ -656,8 +657,8 @@ func (db *DB) recoverJournalRO() error { ) // Recover journals. - if len(recJournalFiles) > 0 { - db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles)) + if len(fds) > 0 { + db.logf("journal@recovery RO·Mode F·%d", len(fds)) var ( jr *journal.Reader @@ -665,19 +666,19 @@ func (db *DB) recoverJournalRO() error { batch = &Batch{} ) - for _, jf := range recJournalFiles { - db.logf("journal@recovery recovering @%d", jf.Num()) + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) - fr, err := jf.Open() + fr, err := db.s.stor.Open(fd) if err != nil { return err } // Create or reset journal reader instance. if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { - jr.Reset(fr, dropper{db.s, jf}, strict, checksum) + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Replay journal to memdb. @@ -689,7 +690,7 @@ func (db *DB) recoverJournalRO() error { } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } buf.Reset() @@ -700,7 +701,7 @@ func (db *DB) recoverJournalRO() error { } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { @@ -710,7 +711,7 @@ func (db *DB) recoverJournalRO() error { } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } // Save sequence number. @@ -727,46 +728,35 @@ func (db *DB) recoverJournalRO() error { return nil } -func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue +func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) { + mk, mv, err := mdb.Find(ikey) + if err == nil { + ukey, _, kt, kerr := parseInternalKey(mk) + if kerr != nil { + // Shouldn't have had happen. + panic(kerr) } - defer m.decref() + if icmp.uCompare(ukey, ikey.ukey()) == 0 { + if kt == keyTypeDel { + return true, nil, ErrNotFound + } + return true, mv, nil - mk, mv, me := m.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return nil, ErrNotFound - } - return append([]byte{}, mv...), nil - } - } else if me != ErrNotFound { - return nil, me } - } - - v := db.s.version() - value, cSched, err := v.get(ikey, ro, false) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) + } else if err != ErrNotFound { + return true, nil, err } return } -func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { - ikey := newIkey(key, seq, ktSeek) +func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me + } + } em, fm := db.getMems() for _, m := range [...]*memDB{em, fm} { @@ -775,30 +765,55 @@ func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err er } defer m.decref() - mk, _, me := m.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return false, nil - } - return true, nil - } - } else if me != ErrNotFound { - return false, me + if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me } } v := db.s.version() - _, cSched, err := v.get(ikey, ro, true) + value, cSched, err := v.get(auxt, ikey, ro, false) v.release() if cSched { // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) + db.compTrigger(db.tcompCmdC) + } + return +} + +func nilIfNotFound(err error) error { + if err == ErrNotFound { + return nil + } + return err +} + +func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } + + v := db.s.version() + _, cSched, err := v.get(auxt, ikey, ro, true) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) } if err == nil { ret = true @@ -822,7 +837,7 @@ func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { se := db.acquireSnapshot() defer db.releaseSnapshot(se) - return db.get(key, se.seq, ro) + return db.get(nil, nil, key, se.seq, ro) } // Has returns true if the DB does contains the given key. @@ -836,11 +851,11 @@ func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { se := db.acquireSnapshot() defer db.releaseSnapshot(se) - return db.has(key, se.seq, ro) + return db.has(nil, nil, key, se.seq, ro) } // NewIterator returns an iterator for the latest snapshot of the -// uderlying DB. +// underlying DB. // The returned iterator is not goroutine-safe, but it is safe to use // multiple iterators concurrently, with each in a dedicated goroutine. // It is also safe to use an iterator concurrently with modifying its @@ -864,7 +879,7 @@ func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Itera defer db.releaseSnapshot(se) // Iterator holds 'version' lock, 'version' is immutable so snapshot // can be released after iterator created. - return db.newIterator(se.seq, slice, ro) + return db.newIterator(nil, nil, se.seq, slice, ro) } // GetSnapshot returns a latest snapshot of the underlying DB. A snapshot @@ -920,7 +935,7 @@ func (db *DB) GetProperty(name string) (value string, err error) { var level uint var rest string n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) - if n != 1 || int(level) >= db.s.o.GetNumLevel() { + if n != 1 { err = ErrNotFound } else { value = fmt.Sprint(v.tLen(int(level))) @@ -929,8 +944,8 @@ func (db *DB) GetProperty(name string) (value string, err error) { value = "Compactions\n" + " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.tables { - duration, read, write := db.compStats[level].get() + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) if len(tables) == 0 && duration == 0 { continue } @@ -939,10 +954,10 @@ func (db *DB) GetProperty(name string) (value string, err error) { float64(read)/1048576.0, float64(write)/1048576.0) } case p == "sstables": - for level, tables := range v.tables { + for level, tables := range v.levels { value += fmt.Sprintf("--- level %d ---\n", level) for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) + value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax) } } case p == "blockpool": @@ -982,8 +997,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { sizes := make(Sizes, 0, len(ranges)) for _, r := range ranges { - imin := newIkey(r.Start, kMaxSeq, ktSeek) - imax := newIkey(r.Limit, kMaxSeq, ktSeek) + imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek) + imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek) start, err := v.offsetOf(imin) if err != nil { return nil, err @@ -992,7 +1007,7 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { if err != nil { return nil, err } - var size uint64 + var size int64 if limit >= start { size = limit - start } @@ -1002,8 +1017,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { return sizes, nil } -// Close closes the DB. This will also releases any outstanding snapshot and -// abort any in-flight compaction. +// Close closes the DB. This will also releases any outstanding snapshot, +// abort any in-flight compaction and discard open transaction. // // It is not safe to close a DB until all outstanding iterators are released. // It is valid to call Close multiple times. Other methods should not be @@ -1032,11 +1047,18 @@ func (db *DB) Close() error { // Signal all goroutines. close(db.closeC) + // Discard open transaction. + if db.tr != nil { + db.tr.Discard() + } + + // Acquire writer lock. + db.writeLockC <- struct{}{} + // Wait for all gorotines to exit. db.closeW.Wait() - // Lock writer and closes journal. - db.writeLockC <- struct{}{} + // Closes journal. if db.journal != nil { db.journal.Close() db.journalWriter.Close() @@ -1063,8 +1085,6 @@ func (db *DB) Close() error { db.frozenMem = nil db.journal = nil db.journalWriter = nil - db.journalFile = nil - db.frozenJournalFile = nil db.closer = nil return err diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go similarity index 78% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go index 2600310..5553202 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -12,55 +12,76 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" ) var ( errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") ) -type cStats struct { - sync.Mutex +type cStat struct { duration time.Duration - read uint64 - write uint64 + read int64 + write int64 } -func (p *cStats) add(n *cStatsStaging) { - p.Lock() +func (p *cStat) add(n *cStatStaging) { p.duration += n.duration p.read += n.read p.write += n.write - p.Unlock() } -func (p *cStats) get() (duration time.Duration, read, write uint64) { - p.Lock() - defer p.Unlock() +func (p *cStat) get() (duration time.Duration, read, write int64) { return p.duration, p.read, p.write } -type cStatsStaging struct { +type cStatStaging struct { start time.Time duration time.Duration on bool - read uint64 - write uint64 + read int64 + write int64 } -func (p *cStatsStaging) startTimer() { +func (p *cStatStaging) startTimer() { if !p.on { p.start = time.Now() p.on = true } } -func (p *cStatsStaging) stopTimer() { +func (p *cStatStaging) stopTimer() { if p.on { p.duration += time.Since(p.start) p.on = false } } +type cStats struct { + lk sync.Mutex + stats []cStat +} + +func (p *cStats) addStat(level int, n *cStatStaging) { + p.lk.Lock() + if level >= len(p.stats) { + newStats := make([]cStat, level+1) + copy(newStats, p.stats) + p.stats = newStats + } + p.stats[level].add(n) + p.lk.Unlock() +} + +func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) { + p.lk.Lock() + defer p.lk.Unlock() + if level < len(p.stats) { + return p.stats[level].get() + } + return +} + func (db *DB) compactionError() { var err error noerr: @@ -151,7 +172,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) { disableBackoff = db.s.o.GetDisableCompactionBackoff() ) for n := 0; ; n++ { - // Check wether the DB is closed. + // Check whether the DB is closed. if db.isClosed() { db.logf("%s exiting", name) db.compactionExitTransact() @@ -235,6 +256,14 @@ func (db *DB) compactionExitTransact() { panic(errCompactionTransactExiting) } +func (db *DB) compactionCommit(name string, rec *sessionRecord) { + db.compCommitLk.Lock() + defer db.compCommitLk.Unlock() // Defer is necessary. + db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error { + return db.s.commit(rec) + }, nil) +} + func (db *DB) memCompaction() { mdb := db.getFrozenMem() if mdb == nil { @@ -265,41 +294,40 @@ func (db *DB) memCompaction() { var ( rec = &sessionRecord{} - stats = &cStatsStaging{} + stats = &cStatStaging{} flushLevel int ) + // Generate tables. db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { stats.startTimer() - flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1) + flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel) stats.stopTimer() return }, func() error { for _, r := range rec.addedTables { db.logf("memdb@flush revert @%d", r.num) - f := db.s.getTableFile(r.num) - if err := f.Remove(); err != nil { + if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil { return err } } return nil }) - db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - rec.setJournalNum(db.journalFile.Num()) - rec.setSeqNum(db.frozenSeq) - err = db.s.commit(rec) - stats.stopTimer() - return - }, nil) + rec.setJournalNum(db.journalFd.Num) + rec.setSeqNum(db.frozenSeq) + + // Commit. + stats.startTimer() + db.compactionCommit("memdb", rec) + stats.stopTimer() db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) for _, r := range rec.addedTables { stats.write += r.size } - db.compStats[flushLevel].add(stats) + db.compStats.addStat(flushLevel, stats) // Drop frozen memdb. db.dropFrozenMem() @@ -315,7 +343,7 @@ func (db *DB) memCompaction() { } // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) + db.compTrigger(db.tcompCmdC) } type tableCompactionBuilder struct { @@ -323,7 +351,7 @@ type tableCompactionBuilder struct { s *session c *compaction rec *sessionRecord - stat0, stat1 *cStatsStaging + stat0, stat1 *cStatStaging snapHasLastUkey bool snapLastUkey []byte @@ -377,9 +405,9 @@ func (b *tableCompactionBuilder) flush() error { if err != nil { return err } - b.rec.addTableFile(b.c.level+1, t) + b.rec.addTableFile(b.c.sourceLevel+1, t) b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) + b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) b.tw = nil return nil } @@ -424,7 +452,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { } ikey := iter.Key() - ukey, seq, kt, kerr := parseIkey(ikey) + ukey, seq, kt, kerr := parseInternalKey(ikey) if kerr == nil { shouldStop := !resumed && b.c.shouldStopBefore(ikey) @@ -450,14 +478,14 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { hasLastUkey = true lastUkey = append(lastUkey[:0], ukey...) - lastSeq = kMaxSeq + lastSeq = keyMaxSeq } switch { case lastSeq <= b.minSeq: // Dropped because newer entry for same user key exist fallthrough // (A) - case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): + case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): // For this user key: // (1) there is no data in higher levels // (2) data in lower levels will have larger seq numbers @@ -479,7 +507,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { // Don't drop corrupted keys. hasLastUkey = false lastUkey = lastUkey[:0] - lastSeq = kMaxSeq + lastSeq = keyMaxSeq b.kerrCnt++ } @@ -502,8 +530,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { func (b *tableCompactionBuilder) revert() error { for _, at := range b.rec.addedTables { b.s.logf("table@build revert @%d", at.num) - f := b.s.getTableFile(at.num) - if err := f.Remove(); err != nil { + if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil { return err } } @@ -514,30 +541,28 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) { defer c.release() rec := &sessionRecord{} - rec.addCompPtr(c.level, c.imax) + rec.addCompPtr(c.sourceLevel, c.imax) if !noTrivial && c.trivial() { - t := c.tables[0][0] - db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) - rec.delTable(c.level, t.file.Num()) - rec.addTableFile(c.level+1, t) - db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) { - return db.s.commit(rec) - }, nil) + t := c.levels[0][0] + db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1) + rec.delTable(c.sourceLevel, t.fd.Num) + rec.addTableFile(c.sourceLevel+1, t) + db.compactionCommit("table-move", rec) return } - var stats [2]cStatsStaging - for i, tables := range c.tables { + var stats [2]cStatStaging + for i, tables := range c.levels { for _, t := range tables { stats[i].read += t.size // Insert deleted tables into record - rec.delTable(c.level+i, t.file.Num()) + rec.delTable(c.sourceLevel+i, t.fd.Num) } } sourceSize := int(stats[0].read + stats[1].read) minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) + db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) b := &tableCompactionBuilder{ db: db, @@ -547,49 +572,60 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) { stat1: &stats[1], minSeq: minSeq, strict: db.s.o.GetStrict(opt.StrictCompaction), - tableSize: db.s.o.GetCompactionTableSize(c.level + 1), + tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1), } db.compactionTransact("table@build", b) - // Commit changes - db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) { - stats[1].startTimer() - defer stats[1].stopTimer() - return db.s.commit(rec) - }, nil) + // Commit. + stats[1].startTimer() + db.compactionCommit("table", rec) + stats[1].stopTimer() resultSize := int(stats[1].write) db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) // Save compaction stats for i := range stats { - db.compStats[c.level+1].add(&stats[i]) + db.compStats.addStat(c.sourceLevel+1, &stats[i]) } } -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { +func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error { db.logf("table@compaction range L%d %q:%q", level, umin, umax) - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { + if c := db.s.getCompactionRange(level, umin, umax, true); c != nil { db.tableCompaction(c, true) } } else { - v := db.s.version() - m := 1 - for i, t := range v.tables[1:] { - if t.overlaps(db.s.icmp, umin, umax, false) { - m = i + 1 - } - } - v.release() + // Retry until nothing to compact. + for { + compacted := false - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) + // Scan for maximum level with overlapped tables. + v := db.s.version() + m := 1 + for i := m; i < len(v.levels); i++ { + tables := v.levels[i] + if tables.overlaps(db.s.icmp, umin, umax, false) { + m = i + } + } + v.release() + + for level := 0; level < m; level++ { + if c := db.s.getCompactionRange(level, umin, umax, false); c != nil { + db.tableCompaction(c, true) + compacted = true + } + } + + if !compacted { + break } } } + + return nil } func (db *DB) tableAutoCompaction() { @@ -616,11 +652,11 @@ type cCmd interface { ack(err error) } -type cIdle struct { +type cAuto struct { ackC chan<- error } -func (r cIdle) ack(err error) { +func (r cAuto) ack(err error) { if r.ackC != nil { defer func() { recover() @@ -644,13 +680,21 @@ func (r cRange) ack(err error) { } } +// This will trigger auto compaction but will not wait for it. +func (db *DB) compTrigger(compC chan<- cCmd) { + select { + case compC <- cAuto{}: + default: + } +} + // This will trigger auto compation and/or wait for all compaction to be done. -func (db *DB) compSendIdle(compC chan<- cCmd) (err error) { +func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) { ch := make(chan error) defer close(ch) // Send cmd. select { - case compC <- cIdle{ch}: + case compC <- cAuto{ch}: case err = <-db.compErrC: return case _, _ = <-db.closeC: @@ -666,16 +710,8 @@ func (db *DB) compSendIdle(compC chan<- cCmd) (err error) { return err } -// This will trigger auto compaction but will not wait for it. -func (db *DB) compSendTrigger(compC chan<- cCmd) { - select { - case compC <- cIdle{}: - default: - } -} - // Send range compaction request. -func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { +func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) { ch := make(chan error) defer close(ch) // Send cmd. @@ -715,7 +751,7 @@ func (db *DB) mCompaction() { select { case x = <-db.mcompCmdC: switch x.(type) { - case cIdle: + case cAuto: db.memCompaction() x.ack(nil) x = nil @@ -776,11 +812,10 @@ func (db *DB) tCompaction() { } if x != nil { switch cmd := x.(type) { - case cIdle: + case cAuto: ackQ = append(ackQ, x) case cRange: - db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) - x.ack(nil) + x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) default: panic("leveldb: unknown command") } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go similarity index 81% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go index 656ae98..03c24cd 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go @@ -19,7 +19,7 @@ import ( ) var ( - errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") + errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") ) type memdbReleaser struct { @@ -33,40 +33,50 @@ func (mr *memdbReleaser) Release() { }) } -func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { +func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) em, fm := db.getMems() v := db.s.version() - ti := v.getIterators(slice, ro) - n := len(ti) + 2 - i := make([]iterator.Iterator, 0, n) + tableIts := v.getIterators(slice, ro) + n := len(tableIts) + len(auxt) + 3 + its := make([]iterator.Iterator, 0, n) + + if auxm != nil { + ami := auxm.NewIterator(slice) + ami.SetReleaser(&memdbReleaser{m: auxm}) + its = append(its, ami) + } + for _, t := range auxt { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + emi := em.NewIterator(slice) emi.SetReleaser(&memdbReleaser{m: em}) - i = append(i, emi) + its = append(its, emi) if fm != nil { fmi := fm.NewIterator(slice) fmi.SetReleaser(&memdbReleaser{m: fm}) - i = append(i, fmi) + its = append(its, fmi) } - i = append(i, ti...) - strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) - mi := iterator.NewMergedIterator(i, db.s.icmp, strict) + its = append(its, tableIts...) + mi := iterator.NewMergedIterator(its, db.s.icmp, strict) mi.SetReleaser(&versionReleaser{v: v}) return mi } -func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { +func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { var islice *util.Range if slice != nil { islice = &util.Range{} if slice.Start != nil { - islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek) + islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek) } if slice.Limit != nil { - islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek) + islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek) } } - rawIter := db.newRawIterator(islice, ro) + rawIter := db.newRawIterator(auxm, auxt, islice, ro) iter := &dbIter{ db: db, icmp: db.s.icmp, @@ -177,7 +187,7 @@ func (i *dbIter) Seek(key []byte) bool { return false } - ikey := newIkey(key, i.seq, ktSeek) + ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek) if i.iter.Seek(ikey) { i.dir = dirSOI return i.next() @@ -189,15 +199,15 @@ func (i *dbIter) Seek(key []byte) bool { func (i *dbIter) next() bool { for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { i.sampleSeek() if seq <= i.seq { switch kt { - case ktDel: + case keyTypeDel: // Skip deleted key. i.key = append(i.key[:0], ukey...) i.dir = dirForward - case ktVal: + case keyTypeVal: if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { i.key = append(i.key[:0], ukey...) i.value = append(i.value[:0], i.iter.Value()...) @@ -240,13 +250,13 @@ func (i *dbIter) prev() bool { del := true if i.iter.Valid() { for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { i.sampleSeek() if seq <= i.seq { if !del && i.icmp.uCompare(ukey, i.key) < 0 { return true } - del = (kt == ktDel) + del = (kt == keyTypeDel) if !del { i.key = append(i.key[:0], ukey...) i.value = append(i.value[:0], i.iter.Value()...) @@ -282,7 +292,7 @@ func (i *dbIter) Prev() bool { return i.Last() case dirForward: for i.iter.Prev() { - if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil { + if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil { i.sampleSeek() if i.icmp.uCompare(ukey, i.key) < 0 { goto cont diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go similarity index 94% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go index 0372848..977f65b 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go @@ -110,7 +110,7 @@ func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err er err = ErrSnapshotReleased return } - return snap.db.get(key, snap.elem.seq, ro) + return snap.db.get(nil, nil, key, snap.elem.seq, ro) } // Has returns true if the DB does contains the given key. @@ -127,10 +127,10 @@ func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) err = ErrSnapshotReleased return } - return snap.db.has(key, snap.elem.seq, ro) + return snap.db.has(nil, nil, key, snap.elem.seq, ro) } -// NewIterator returns an iterator for the snapshot of the uderlying DB. +// NewIterator returns an iterator for the snapshot of the underlying DB. // The returned iterator is not goroutine-safe, but it is safe to use // multiple iterators concurrently, with each in a dedicated goroutine. // It is also safe to use an iterator concurrently with modifying its @@ -158,7 +158,7 @@ func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterat } // Since iterator already hold version ref, it doesn't need to // hold snapshot ref. - return snap.db.newIterator(snap.elem.seq, slice, ro) + return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro) } // Release releases the snapshot. This will not release any returned diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go similarity index 79% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go index 24671dd..40f454d 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go @@ -12,6 +12,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/journal" "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" ) type memDB struct { @@ -20,6 +21,10 @@ type memDB struct { ref int32 } +func (m *memDB) getref() int32 { + return atomic.LoadInt32(&m.ref) +} + func (m *memDB) incref() { atomic.AddInt32(&m.ref, 1) } @@ -48,11 +53,15 @@ func (db *DB) addSeq(delta uint64) { atomic.AddUint64(&db.seq, delta) } -func (db *DB) sampleSeek(ikey iKey) { +func (db *DB) setSeq(seq uint64) { + atomic.StoreUint64(&db.seq, seq) +} + +func (db *DB) sampleSeek(ikey internalKey) { v := db.s.version() if v.sampleSeek(ikey) { // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) + db.compTrigger(db.tcompCmdC) } v.release() } @@ -67,12 +76,18 @@ func (db *DB) mpoolPut(mem *memdb.DB) { } } -func (db *DB) mpoolGet() *memdb.DB { +func (db *DB) mpoolGet(n int) *memDB { + var mdb *memdb.DB select { - case mem := <-db.memPool: - return mem + case mdb = <-db.memPool: default: - return nil + } + if mdb == nil || mdb.Capacity() < n { + mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) + } + return &memDB{ + db: db, + DB: mdb, } } @@ -95,11 +110,10 @@ func (db *DB) mpoolDrain() { // Create new memdb and froze the old one; need external synchronization. // newMem only called synchronously by the writer. func (db *DB) newMem(n int) (mem *memDB, err error) { - num := db.s.allocFileNum() - file := db.s.getJournalFile(num) - w, err := file.Create() + fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} + w, err := db.s.stor.Create(fd) if err != nil { - db.s.reuseFileNum(num) + db.s.reuseFileNum(fd.Num) return } @@ -115,20 +129,14 @@ func (db *DB) newMem(n int) (mem *memDB, err error) { } else { db.journal.Reset(w) db.journalWriter.Close() - db.frozenJournalFile = db.journalFile + db.frozenJournalFd = db.journalFd } db.journalWriter = w - db.journalFile = file + db.journalFd = fd db.frozenMem = db.mem - mdb := db.mpoolGet() - if mdb == nil || mdb.Capacity() < n { - mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) - } - mem = &memDB{ - db: db, - DB: mdb, - ref: 2, - } + mem = db.mpoolGet(n) + mem.incref() // for self + mem.incref() // for caller db.mem = mem // The seq only incremented by the writer. And whoever called newMem // should hold write lock, so no need additional synchronization here. @@ -181,12 +189,12 @@ func (db *DB) getFrozenMem() *memDB { // Drop frozen memdb; assume that frozen memdb isn't nil. func (db *DB) dropFrozenMem() { db.memMu.Lock() - if err := db.frozenJournalFile.Remove(); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) + if err := db.s.stor.Remove(db.frozenJournalFd); err != nil { + db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err) } else { - db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) + db.logf("journal@remove removed @%d", db.frozenJournalFd.Num) } - db.frozenJournalFile = nil + db.frozenJournalFd = storage.FileDesc{} db.frozenMem.decref() db.frozenMem = nil db.memMu.Unlock() diff --git a/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go new file mode 100644 index 0000000..fca8803 --- /dev/null +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go @@ -0,0 +1,289 @@ +// Copyright (c) 2016, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var errTransactionDone = errors.New("leveldb: transaction already closed") + +// Transaction is the transaction handle. +type Transaction struct { + db *DB + lk sync.RWMutex + seq uint64 + mem *memDB + tables tFiles + ikScratch []byte + rec sessionRecord + stats cStatStaging + closed bool +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contains the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return nil, errTransactionDone + } + return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Has returns. +func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return false, errTransactionDone + } + return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// NewIterator returns an iterator for the latest snapshot of the transaction. +// The returned iterator is not goroutine-safe, but it is safe to use multiple +// iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently while writes to the +// transaction. The resultant key/value pairs are guaranteed to be consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return iterator.NewEmptyIterator(errTransactionDone) + } + tr.mem.incref() + return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro) +} + +func (tr *Transaction) flush() error { + // Flush memdb. + if tr.mem.Len() != 0 { + tr.stats.startTimer() + iter := tr.mem.NewIterator(nil) + t, n, err := tr.db.s.tops.createFrom(iter) + iter.Release() + tr.stats.stopTimer() + if err != nil { + return err + } + if tr.mem.getref() == 1 { + tr.mem.Reset() + } else { + tr.mem.decref() + tr.mem = tr.db.mpoolGet(0) + tr.mem.incref() + } + tr.tables = append(tr.tables, t) + tr.rec.addTableFile(0, t) + tr.stats.write += t.size + tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + } + return nil +} + +func (tr *Transaction) put(kt keyType, key, value []byte) error { + tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt) + if tr.mem.Free() < len(tr.ikScratch)+len(value) { + if err := tr.flush(); err != nil { + return err + } + } + if err := tr.mem.Put(tr.ikScratch, value); err != nil { + return err + } + tr.seq++ + return nil +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Put returns. +func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeVal, key, value) +} + +// Delete deletes the value for the given key. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeDel, key, nil) +} + +// Write apply the given batch to the transaction. The batch will be applied +// sequentially. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Write returns. +func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error { + if b == nil || b.Len() == 0 { + return nil + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return b.decodeRec(func(i int, kt keyType, key, value []byte) error { + return tr.put(kt, key, value) + }) +} + +func (tr *Transaction) setDone() { + tr.closed = true + tr.db.tr = nil + tr.mem.decref() + <-tr.db.writeLockC +} + +// Commit commits the transaction. +// +// Other methods should not be called after transaction has been committed. +func (tr *Transaction) Commit() error { + if err := tr.db.ok(); err != nil { + return err + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + defer tr.setDone() + if err := tr.flush(); err != nil { + tr.discard() + return err + } + if len(tr.tables) != 0 { + // Committing transaction. + tr.rec.setSeqNum(tr.seq) + tr.db.compCommitLk.Lock() + defer tr.db.compCommitLk.Unlock() + for retry := 0; retry < 3; retry++ { + if err := tr.db.s.commit(&tr.rec); err != nil { + tr.db.logf("transaction@commit error R·%d %q", retry, err) + select { + case <-time.After(time.Second): + case _, _ = <-tr.db.closeC: + tr.db.logf("transaction@commit exiting") + return err + } + } else { + // Success. Set db.seq. + tr.db.setSeq(tr.seq) + break + } + } + // Trigger table auto-compaction. + tr.db.compTrigger(tr.db.tcompCmdC) + } + return nil +} + +func (tr *Transaction) discard() { + // Discard transaction. + for _, t := range tr.tables { + tr.db.logf("transaction@discard @%d", t.fd.Num) + if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil { + tr.db.s.reuseFileNum(t.fd.Num) + } + } +} + +// Discard discards the transaction. +// +// Other methods should not be called after transaction has been discarded. +func (tr *Transaction) Discard() { + tr.lk.Lock() + if !tr.closed { + tr.discard() + tr.setDone() + } + tr.lk.Unlock() +} + +// OpenTransaction opens an atomic DB transaction. Only one transaction can be +// opened at a time. Write will be blocked until the transaction is committed or +// discarded. +// The returned transaction handle is goroutine-safe. +// +// The transaction must be closed once done, either by committing or discarding +// the transaction. +// Closing the DB will discard open transaction. +func (db *DB) OpenTransaction() (*Transaction, error) { + if err := db.ok(); err != nil { + return nil, err + } + + // The write happen synchronously. + select { + case db.writeLockC <- struct{}{}: + case err := <-db.compPerErrC: + return nil, err + case _, _ = <-db.closeC: + return nil, ErrClosed + } + + if db.tr != nil { + panic("leveldb: has open transaction") + } + + // Flush current memdb. + if db.mem != nil && db.mem.Len() != 0 { + if _, err := db.rotateMem(0, true); err != nil { + return nil, err + } + } + + tr := &Transaction{ + db: db, + seq: db.seq, + mem: db.mpoolGet(0), + } + tr.mem.incref() + db.tr = tr + return tr, nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go similarity index 57% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go index a8a2bdf..7fd386c 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go @@ -21,14 +21,16 @@ type Reader interface { NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator } -type Sizes []uint64 +// Sizes is list of size. +type Sizes []int64 // Sum returns sum of the sizes. -func (p Sizes) Sum() (n uint64) { - for _, s := range p { - n += s +func (sizes Sizes) Sum() int64 { + var sum int64 + for _, size := range sizes { + sum += size } - return n + return sum } // Logging. @@ -40,59 +42,59 @@ func (db *DB) checkAndCleanFiles() error { v := db.s.version() defer v.release() - tablesMap := make(map[uint64]bool) - for _, tables := range v.tables { + tmap := make(map[int64]bool) + for _, tables := range v.levels { for _, t := range tables { - tablesMap[t.file.Num()] = false + tmap[t.fd.Num] = false } } - files, err := db.s.getFiles(storage.TypeAll) + fds, err := db.s.stor.List(storage.TypeAll) if err != nil { return err } - var nTables int - var rem []storage.File - for _, f := range files { + var nt int + var rem []storage.FileDesc + for _, fd := range fds { keep := true - switch f.Type() { + switch fd.Type { case storage.TypeManifest: - keep = f.Num() >= db.s.manifestFile.Num() + keep = fd.Num >= db.s.manifestFd.Num case storage.TypeJournal: - if db.frozenJournalFile != nil { - keep = f.Num() >= db.frozenJournalFile.Num() + if !db.frozenJournalFd.Nil() { + keep = fd.Num >= db.frozenJournalFd.Num } else { - keep = f.Num() >= db.journalFile.Num() + keep = fd.Num >= db.journalFd.Num } case storage.TypeTable: - _, keep = tablesMap[f.Num()] + _, keep = tmap[fd.Num] if keep { - tablesMap[f.Num()] = true - nTables++ + tmap[fd.Num] = true + nt++ } } if !keep { - rem = append(rem, f) + rem = append(rem, fd) } } - if nTables != len(tablesMap) { - var missing []*storage.FileInfo - for num, present := range tablesMap { + if nt != len(tmap) { + var mfds []storage.FileDesc + for num, present := range tmap { if !present { - missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num}) + mfds = append(mfds, storage.FileDesc{storage.TypeTable, num}) db.logf("db@janitor table missing @%d", num) } } - return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing}) + return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds}) } - db.logf("db@janitor F·%d G·%d", len(files), len(rem)) - for _, f := range rem { - db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) - if err := f.Remove(); err != nil { + db.logf("db@janitor F·%d G·%d", len(fds), len(rem)) + for _, fd := range rem { + db.logf("db@janitor removing %s-%d", fd.Type, fd.Num) + if err := db.s.stor.Remove(fd); err != nil { return err } } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go similarity index 85% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go index 0c39565..5576761 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go @@ -45,9 +45,9 @@ func (db *DB) jWriter() { } } -func (db *DB) rotateMem(n int) (mem *memDB, err error) { +func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) { // Wait for pending memdb compaction. - err = db.compSendIdle(db.mcompCmdC) + err = db.compTriggerWait(db.mcompCmdC) if err != nil { return } @@ -59,7 +59,11 @@ func (db *DB) rotateMem(n int) (mem *memDB, err error) { } // Schedule memdb compaction. - db.compSendTrigger(db.mcompCmdC) + if wait { + err = db.compTriggerWait(db.mcompCmdC) + } else { + db.compTrigger(db.mcompCmdC) + } return } @@ -84,7 +88,7 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { return false case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger(): delayed = true - err = db.compSendIdle(db.tcompCmdC) + err = db.compTriggerWait(db.tcompCmdC) if err != nil { return false } @@ -94,7 +98,7 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { mdbFree = n } else { mdb.decref() - mdb, err = db.rotateMem(n) + mdb, err = db.rotateMem(n, false) if err == nil { mdbFree = mdb.Free() } else { @@ -131,12 +135,27 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { b.init(wo.GetSync() && !db.s.o.GetNoSync()) + if b.size() > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() { + // Writes using transaction. + tr, err1 := db.OpenTransaction() + if err1 != nil { + return err1 + } + if err1 := tr.Write(b, wo); err1 != nil { + tr.Discard() + return err1 + } + return tr.Commit() + } + // The write happen synchronously. select { case db.writeC <- b: if <-db.writeMergedC { return <-db.writeAckC } + // Continue, the write lock already acquired by previous writer + // and handed out to us. case db.writeLockC <- struct{}{}: case err = <-db.compPerErrC: return @@ -147,14 +166,15 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { merged := 0 danglingMerge := false defer func() { + for i := 0; i < merged; i++ { + db.writeAckC <- err + } if danglingMerge { + // Only one dangling merge at most, so this is safe. db.writeMergedC <- false } else { <-db.writeLockC } - for i := 0; i < merged; i++ { - db.writeAckC <- err - } }() mdb, mdbFree, err := db.flush(b.size()) @@ -234,7 +254,7 @@ drain: db.addSeq(uint64(b.Len())) if b.size() >= mdbFree { - db.rotateMem(0) + db.rotateMem(0, false) } return } @@ -261,8 +281,8 @@ func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { iter := mem.NewIterator(nil) defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) + return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) && + (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0)) } // CompactRange compacts the underlying DB for the given key range. @@ -293,12 +313,12 @@ func (db *DB) CompactRange(r util.Range) error { defer mdb.decref() if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { // Memdb compaction. - if _, err := db.rotateMem(0); err != nil { + if _, err := db.rotateMem(0, false); err != nil { <-db.writeLockC return err } <-db.writeLockC - if err := db.compSendIdle(db.mcompCmdC); err != nil { + if err := db.compTriggerWait(db.mcompCmdC); err != nil { return err } } else { @@ -306,7 +326,7 @@ func (db *DB) CompactRange(r util.Range) error { } // Table compaction. - return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) + return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit) } // SetReadOnly makes DB read-only. It will stay read-only until reopened. diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/doc.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/errors.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go similarity index 79% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go index dacbf13..9a0f6e2 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go @@ -29,21 +29,21 @@ func New(text string) error { // ErrCorrupted is the type that wraps errors that indicate corruption in // the database. type ErrCorrupted struct { - File *storage.FileInfo - Err error + Fd storage.FileDesc + Err error } func (e *ErrCorrupted) Error() string { - if e.File != nil { - return fmt.Sprintf("%v [file=%v]", e.Err, e.File) + if !e.Fd.Nil() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) } else { return e.Err.Error() } } // NewErrCorrupted creates new ErrCorrupted error. -func NewErrCorrupted(f storage.File, err error) error { - return &ErrCorrupted{storage.NewFileInfo(f), err} +func NewErrCorrupted(fd storage.FileDesc, err error) error { + return &ErrCorrupted{fd, err} } // IsCorrupted returns a boolean indicating whether the error is indicating @@ -61,17 +61,17 @@ func IsCorrupted(err error) bool { // ErrMissingFiles is the type that indicating a corruption due to missing // files. ErrMissingFiles always wrapped with ErrCorrupted. type ErrMissingFiles struct { - Files []*storage.FileInfo + Fds []storage.FileDesc } func (e *ErrMissingFiles) Error() string { return "file missing" } -// SetFile sets 'file info' of the given error with the given file. +// SetFd sets 'file info' of the given error with the given file. // Currently only ErrCorrupted is supported, otherwise will do nothing. -func SetFile(err error, f storage.File) error { +func SetFd(err error, fd storage.FileDesc) error { switch x := err.(type) { case *ErrCorrupted: - x.File = storage.NewFileInfo(f) + x.Fd = fd return x } return err diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/filter.go similarity index 84% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/filter.go index 37c1e14..e961e42 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/filter.go @@ -15,7 +15,7 @@ type iFilter struct { } func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, iKey(key).ukey()) + return f.Filter.Contains(filter, internalKey(key).ukey()) } func (f iFilter) NewGenerator() filter.FilterGenerator { @@ -27,5 +27,5 @@ type iFilterGenerator struct { } func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(iKey(key).ukey()) + g.FilterGenerator.Add(internalKey(key).ukey()) } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go similarity index 99% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go index 6519ec6..891098b 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go @@ -83,6 +83,7 @@ import ( "io" "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/util" ) @@ -165,7 +166,7 @@ func (r *Reader) corrupt(n int, reason string, skip bool) error { r.dropper.Drop(&ErrCorrupted{n, reason}) } if r.strict && !skip { - r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason}) + r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason}) return r.err } return errSkip diff --git a/cmd/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/key.go new file mode 100644 index 0000000..d0b80aa --- /dev/null +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/key.go @@ -0,0 +1,147 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrInternalKeyCorrupted records internal key corruption. +type ErrInternalKeyCorrupted struct { + Ikey []byte + Reason string +} + +func (e *ErrInternalKeyCorrupted) Error() string { + return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason) +} + +func newErrInternalKeyCorrupted(ikey []byte, reason string) error { + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason}) +} + +type keyType uint + +func (kt keyType) String() string { + switch kt { + case keyTypeDel: + return "d" + case keyTypeVal: + return "v" + } + return "x" +} + +// Value types encoded as the last component of internal keys. +// Don't modify; this value are saved to disk. +const ( + keyTypeDel keyType = iota + keyTypeVal +) + +// keyTypeSeek defines the keyType that should be passed when constructing an +// internal key for seeking to a particular sequence number (since we +// sort sequence numbers in decreasing order and the value type is +// embedded as the low 8 bits in the sequence number in internal keys, +// we need to use the highest-numbered ValueType, not the lowest). +const keyTypeSeek = keyTypeVal + +const ( + // Maximum value possible for sequence number; the 8-bits are + // used by value type, so its can packed together in single + // 64-bit integer. + keyMaxSeq = (uint64(1) << 56) - 1 + // Maximum value possible for packed sequence number and type. + keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek) +) + +// Maximum number encoded in bytes. +var keyMaxNumBytes = make([]byte, 8) + +func init() { + binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum) +} + +type internalKey []byte + +func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey { + if seq > keyMaxSeq { + panic("leveldb: invalid sequence number") + } else if kt > keyTypeVal { + panic("leveldb: invalid type") + } + + if n := len(ukey) + 8; cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + copy(dst, ukey) + binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt)) + return internalKey(dst) +} + +func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) { + if len(ik) < 8 { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length") + } + num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type") + } + ukey = ik[:len(ik)-8] + return +} + +func validInternalKey(ik []byte) bool { + _, _, _, err := parseInternalKey(ik) + return err == nil +} + +func (ik internalKey) assert() { + if ik == nil { + panic("leveldb: nil internalKey") + } + if len(ik) < 8 { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik))) + } +} + +func (ik internalKey) ukey() []byte { + ik.assert() + return ik[:len(ik)-8] +} + +func (ik internalKey) num() uint64 { + ik.assert() + return binary.LittleEndian.Uint64(ik[len(ik)-8:]) +} + +func (ik internalKey) parseNum() (seq uint64, kt keyType) { + num := ik.num() + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) + } + return +} + +func (ik internalKey) String() string { + if ik == nil { + return "" + } + + if ukey, seq, kt, err := parseInternalKey(ik); err == nil { + return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go similarity index 94% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go index 7b5d8b9..3d2bf1c 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go @@ -8,10 +8,11 @@ package opt import ( + "math" + "github.com/syndtr/goleveldb/leveldb/cache" "github.com/syndtr/goleveldb/leveldb/comparer" "github.com/syndtr/goleveldb/leveldb/filter" - "math" ) const ( @@ -35,8 +36,6 @@ var ( DefaultCompactionTotalSizeMultiplier = 10.0 DefaultCompressionType = SnappyCompression DefaultIteratorSamplingRate = 1 * MiB - DefaultMaxMemCompationLevel = 2 - DefaultNumLevel = 7 DefaultOpenFilesCacher = LRUCacher DefaultOpenFilesCacheCapacity = 500 DefaultWriteBuffer = 4 * MiB @@ -266,6 +265,13 @@ type Options struct { // The default value is false. DisableCompactionBackoff bool + // DisableLargeBatchTransaction allows disabling switch-to-transaction mode + // on large batch write. If enable batch writes large than WriteBuffer will + // use transaction. + // + // The default is false. + DisableLargeBatchTransaction bool + // ErrorIfExist defines whether an error should returned if the DB already // exist. // @@ -301,24 +307,11 @@ type Options struct { // The default is 1MiB. IteratorSamplingRate int - // MaxMemCompationLevel defines maximum level a newly compacted 'memdb' - // will be pushed into if doesn't creates overlap. This should less than - // NumLevel. Use -1 for level-0. - // - // The default is 2. - MaxMemCompationLevel int - // NoSync allows completely disable fsync. // // The default is false. NoSync bool - // NumLevel defines number of database level. The level shouldn't changed - // between opens, or the database will panic. - // - // The default is 7. - NumLevel int - // OpenFilesCacher provides cache algorithm for open files caching. // Specify NoCacher to disable caching algorithm. // @@ -440,7 +433,7 @@ func (o *Options) GetCompactionTableSize(level int) int { if o.CompactionTableSize > 0 { base = o.CompactionTableSize } - if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { + if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { mult = o.CompactionTableSizeMultiplierPerLevel[level] } else if o.CompactionTableSizeMultiplier > 0 { mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) @@ -461,7 +454,7 @@ func (o *Options) GetCompactionTotalSize(level int) int64 { if o.CompactionTotalSize > 0 { base = o.CompactionTotalSize } - if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { + if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { mult = o.CompactionTotalSizeMultiplierPerLevel[level] } else if o.CompactionTotalSizeMultiplier > 0 { mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) @@ -508,6 +501,13 @@ func (o *Options) GetDisableCompactionBackoff() bool { return o.DisableCompactionBackoff } +func (o *Options) GetDisableLargeBatchTransaction() bool { + if o == nil { + return false + } + return o.DisableLargeBatchTransaction +} + func (o *Options) GetErrorIfExist() bool { if o == nil { return false @@ -536,21 +536,6 @@ func (o *Options) GetIteratorSamplingRate() int { return o.IteratorSamplingRate } -func (o *Options) GetMaxMemCompationLevel() int { - level := DefaultMaxMemCompationLevel - if o != nil { - if o.MaxMemCompationLevel > 0 { - level = o.MaxMemCompationLevel - } else if o.MaxMemCompationLevel < 0 { - level = 0 - } - } - if level >= o.GetNumLevel() { - return o.GetNumLevel() - 1 - } - return level -} - func (o *Options) GetNoSync() bool { if o == nil { return false @@ -558,13 +543,6 @@ func (o *Options) GetNoSync() bool { return o.NoSync } -func (o *Options) GetNumLevel() int { - if o == nil || o.NumLevel <= 0 { - return DefaultNumLevel - } - return o.NumLevel -} - func (o *Options) GetOpenFilesCacher() Cacher { if o == nil || o.OpenFilesCacher == nil { return DefaultOpenFilesCacher diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/options.go similarity index 67% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/options.go index a3d84ef..b072b1a 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/options.go @@ -43,6 +43,8 @@ func (s *session) setOptions(o *opt.Options) { s.o.cache() } +const optCachedLevel = 7 + type cachedOptions struct { *opt.Options @@ -54,15 +56,13 @@ type cachedOptions struct { } func (co *cachedOptions) cache() { - numLevel := co.Options.GetNumLevel() + co.compactionExpandLimit = make([]int, optCachedLevel) + co.compactionGPOverlaps = make([]int, optCachedLevel) + co.compactionSourceLimit = make([]int, optCachedLevel) + co.compactionTableSize = make([]int, optCachedLevel) + co.compactionTotalSize = make([]int64, optCachedLevel) - co.compactionExpandLimit = make([]int, numLevel) - co.compactionGPOverlaps = make([]int, numLevel) - co.compactionSourceLimit = make([]int, numLevel) - co.compactionTableSize = make([]int, numLevel) - co.compactionTotalSize = make([]int64, numLevel) - - for level := 0; level < numLevel; level++ { + for level := 0; level < optCachedLevel; level++ { co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) @@ -72,21 +72,36 @@ func (co *cachedOptions) cache() { } func (co *cachedOptions) GetCompactionExpandLimit(level int) int { - return co.compactionExpandLimit[level] + if level < optCachedLevel { + return co.compactionExpandLimit[level] + } + return co.Options.GetCompactionExpandLimit(level) } func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { - return co.compactionGPOverlaps[level] + if level < optCachedLevel { + return co.compactionGPOverlaps[level] + } + return co.Options.GetCompactionGPOverlaps(level) } func (co *cachedOptions) GetCompactionSourceLimit(level int) int { - return co.compactionSourceLimit[level] + if level < optCachedLevel { + return co.compactionSourceLimit[level] + } + return co.Options.GetCompactionSourceLimit(level) } func (co *cachedOptions) GetCompactionTableSize(level int) int { - return co.compactionTableSize[level] + if level < optCachedLevel { + return co.compactionTableSize[level] + } + return co.Options.GetCompactionTableSize(level) } func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { - return co.compactionTotalSize[level] + if level < optCachedLevel { + return co.compactionTotalSize[level] + } + return co.Options.GetCompactionTotalSize(level) } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/session.go similarity index 67% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/session.go index f0bba46..b0d3fef 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/session.go @@ -16,9 +16,9 @@ import ( "github.com/syndtr/goleveldb/leveldb/journal" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" ) +// ErrManifestCorrupted records manifest corruption. type ErrManifestCorrupted struct { Field string Reason string @@ -28,31 +28,31 @@ func (e *ErrManifestCorrupted) Error() string { return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) } -func newErrManifestCorrupted(f storage.File, field, reason string) error { - return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason}) +func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error { + return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason}) } // session represent a persistent database session. type session struct { // Need 64-bit alignment. - stNextFileNum uint64 // current unused file number - stJournalNum uint64 // current journal file number; need external synchronization - stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stNextFileNum int64 // current unused file number + stJournalNum int64 // current journal file number; need external synchronization + stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stTempFileNum int64 stSeqNum uint64 // last mem compacted seq; need external synchronization - stTempFileNum uint64 stor storage.Storage - storLock util.Releaser + storLock storage.Lock o *cachedOptions icmp *iComparer tops *tOps manifest *journal.Writer manifestWriter storage.Writer - manifestFile storage.File + manifestFd storage.FileDesc - stCompPtrs []iKey // compaction pointers; need external synchronization - stVersion *version // current version + stCompPtrs []internalKey // compaction pointers; need external synchronization + stVersion *version // current version vmu sync.Mutex } @@ -66,9 +66,8 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { return } s = &session{ - stor: stor, - storLock: storLock, - stCompPtrs: make([]iKey, o.GetNumLevel()), + stor: stor, + storLock: storLock, } s.setOptions(o) s.tops = newTableOps(s) @@ -88,7 +87,6 @@ func (s *session) close() { } s.manifest = nil s.manifestWriter = nil - s.manifestFile = nil s.stVersion = nil } @@ -109,18 +107,18 @@ func (s *session) recover() (err error) { if os.IsNotExist(err) { // Don't return os.ErrNotExist if the underlying storage contains // other files that belong to LevelDB. So the DB won't get trashed. - if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { - err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} + if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 { + err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} } } }() - m, err := s.stor.GetManifest() + fd, err := s.stor.GetMeta() if err != nil { return } - reader, err := m.Open() + reader, err := s.stor.Open(fd) if err != nil { return } @@ -128,10 +126,9 @@ func (s *session) recover() (err error) { var ( // Options. - numLevel = s.o.GetNumLevel() - strict = s.o.GetStrict(opt.StrictManifest) + strict = s.o.GetStrict(opt.StrictManifest) - jr = journal.NewReader(reader, dropper{s, m}, strict, true) + jr = journal.NewReader(reader, dropper{s, fd}, strict, true) rec = &sessionRecord{} staging = s.stVersion.newStaging() ) @@ -143,24 +140,23 @@ func (s *session) recover() (err error) { err = nil break } - return errors.SetFile(err, m) + return errors.SetFd(err, fd) } - err = rec.decode(r, numLevel) + err = rec.decode(r) if err == nil { // save compact pointers for _, r := range rec.compPtrs { - s.stCompPtrs[r.level] = iKey(r.ikey) + s.setCompPtr(r.level, internalKey(r.ikey)) } // commit record to version staging staging.commit(rec) } else { - err = errors.SetFile(err, m) + err = errors.SetFd(err, fd) if strict || !errors.IsCorrupted(err) { return - } else { - s.logf("manifest error: %v (skipped)", errors.SetFile(err, m)) } + s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd)) } rec.resetCompPtrs() rec.resetAddedTables() @@ -169,18 +165,18 @@ func (s *session) recover() (err error) { switch { case !rec.has(recComparer): - return newErrManifestCorrupted(m, "comparer", "missing") + return newErrManifestCorrupted(fd, "comparer", "missing") case rec.comparer != s.icmp.uName(): - return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) + return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) case !rec.has(recNextFileNum): - return newErrManifestCorrupted(m, "next-file-num", "missing") + return newErrManifestCorrupted(fd, "next-file-num", "missing") case !rec.has(recJournalNum): - return newErrManifestCorrupted(m, "journal-file-num", "missing") + return newErrManifestCorrupted(fd, "journal-file-num", "missing") case !rec.has(recSeqNum): - return newErrManifestCorrupted(m, "seq-num", "missing") + return newErrManifestCorrupted(fd, "seq-num", "missing") } - s.manifestFile = m + s.manifestFd = fd s.setVersion(staging.finish()) s.setNextFileNum(rec.nextFileNum) s.recordCommited(rec) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go similarity index 63% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go index 7c5a794..089cd00 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go @@ -14,41 +14,46 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" ) -func (s *session) pickMemdbLevel(umin, umax []byte) int { +func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int { v := s.version() defer v.release() - return v.pickMemdbLevel(umin, umax) + return v.pickMemdbLevel(umin, umax, maxLevel) } -func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) { +func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) { // Create sorted table. iter := mdb.NewIterator(nil) defer iter.Release() t, n, err := s.tops.createFrom(iter) if err != nil { - return level, err + return 0, err } - // Pick level and add to record. - if level < 0 { - level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey()) - } - rec.addTableFile(level, t) + // Pick level other than zero can cause compaction issue with large + // bulk insert and delete on strictly incrementing key-space. The + // problem is that the small deletion markers trapped at lower level, + // while key/value entries keep growing at higher level. Since the + // key-space is strictly incrementing it will not overlaps with + // higher level, thus maximum possible level is always picked, while + // overlapping deletion marker pushed into lower level. + // See: https://github.com/syndtr/goleveldb/issues/127. + flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel) + rec.addTableFile(flushLevel, t) - s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) - return level, nil + s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + return flushLevel, nil } // Pick a compaction based on current state; need external synchronization. func (s *session) pickCompaction() *compaction { v := s.version() - var level int + var sourceLevel int var t0 tFiles if v.cScore >= 1 { - level = v.cLevel - cptr := s.stCompPtrs[level] - tables := v.tables[level] + sourceLevel = v.cLevel + cptr := s.getCompPtr(sourceLevel) + tables := v.levels[sourceLevel] for _, t := range tables { if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { t0 = append(t0, t) @@ -61,7 +66,7 @@ func (s *session) pickCompaction() *compaction { } else { if p := atomic.LoadPointer(&v.cSeek); p != nil { ts := (*tSet)(p) - level = ts.level + sourceLevel = ts.level t0 = append(t0, ts.table) } else { v.release() @@ -69,14 +74,19 @@ func (s *session) pickCompaction() *compaction { } } - return newCompaction(s, v, level, t0) + return newCompaction(s, v, sourceLevel, t0) } // Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { +func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction { v := s.version() - t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) + if sourceLevel >= len(v.levels) { + v.release() + return nil + } + + t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0) if len(t0) == 0 { v.release() return nil @@ -86,9 +96,9 @@ func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { // But we cannot do this for level-0 since level-0 files can overlap // and we must not pick one file and drop another older file if the // two files overlap. - if level > 0 { - limit := uint64(v.s.o.GetCompactionSourceLimit(level)) - total := uint64(0) + if !noLimit && sourceLevel > 0 { + limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel)) + total := int64(0) for i, t := range t0 { total += t.size if total >= limit { @@ -99,17 +109,17 @@ func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { } } - return newCompaction(s, v, level, t0) + return newCompaction(s, v, sourceLevel, t0) } -func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction { +func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction { c := &compaction{ s: s, v: v, - level: level, - tables: [2]tFiles{t0, nil}, - maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)), - tPtrs: make([]int, s.o.GetNumLevel()), + sourceLevel: sourceLevel, + levels: [2]tFiles{t0, nil}, + maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)), + tPtrs: make([]int, len(v.levels)), } c.expand() c.save() @@ -121,21 +131,21 @@ type compaction struct { s *session v *version - level int - tables [2]tFiles - maxGPOverlaps uint64 + sourceLevel int + levels [2]tFiles + maxGPOverlaps int64 gp tFiles gpi int seenKey bool - gpOverlappedBytes uint64 - imin, imax iKey + gpOverlappedBytes int64 + imin, imax internalKey tPtrs []int released bool snapGPI int snapSeenKey bool - snapGPOverlappedBytes uint64 + snapGPOverlappedBytes int64 snapTPtrs []int } @@ -162,30 +172,34 @@ func (c *compaction) release() { // Expand compacted tables; need external synchronization. func (c *compaction) expand() { - limit := uint64(c.s.o.GetCompactionExpandLimit(c.level)) - vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1] + limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel)) + vt0 := c.v.levels[c.sourceLevel] + vt1 := tFiles{} + if level := c.sourceLevel + 1; level < len(c.v.levels) { + vt1 = c.v.levels[level] + } - t0, t1 := c.tables[0], c.tables[1] + t0, t1 := c.levels[0], c.levels[1] imin, imax := t0.getRange(c.s.icmp) // We expand t0 here just incase ukey hop across tables. - t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0) - if len(t0) != len(c.tables[0]) { + t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0) + if len(t0) != len(c.levels[0]) { imin, imax = t0.getRange(c.s.icmp) } t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) // Get entire range covered by compaction. amin, amax := append(t0, t1...).getRange(c.s.icmp) - // See if we can grow the number of inputs in "level" without - // changing the number of "level+1" files we pick up. + // See if we can grow the number of inputs in "sourceLevel" without + // changing the number of "sourceLevel+1" files we pick up. if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0) + exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0) if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { xmin, xmax := exp0.getRange(c.s.icmp) exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) if len(exp1) == len(t1) { c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), + c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) imin, imax = xmin, xmax t0, t1 = exp0, exp1 @@ -195,22 +209,23 @@ func (c *compaction) expand() { } // Compute the set of grandparent files that overlap this compaction - // (parent == level+1; grandparent == level+2) - if c.level+2 < c.s.o.GetNumLevel() { - c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) + // (parent == sourceLevel+1; grandparent == sourceLevel+2) + if level := c.sourceLevel + 2; level < len(c.v.levels) { + c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) } - c.tables[0], c.tables[1] = t0, t1 + c.levels[0], c.levels[1] = t0, t1 c.imin, c.imax = imin, imax } // Check whether compaction is trivial. func (c *compaction) trivial() bool { - return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps + return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps } func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level, tables := range c.v.tables[c.level+2:] { + for level := c.sourceLevel + 2; level < len(c.v.levels); level++ { + tables := c.v.levels[level] for c.tPtrs[level] < len(tables) { t := tables[c.tPtrs[level]] if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { @@ -227,7 +242,7 @@ func (c *compaction) baseLevelForKey(ukey []byte) bool { return true } -func (c *compaction) shouldStopBefore(ikey iKey) bool { +func (c *compaction) shouldStopBefore(ikey internalKey) bool { for ; c.gpi < len(c.gp); c.gpi++ { gp := c.gp[c.gpi] if c.s.icmp.Compare(ikey, gp.imax) <= 0 { @@ -250,10 +265,10 @@ func (c *compaction) shouldStopBefore(ikey iKey) bool { // Creates an iterator. func (c *compaction) newIterator() iterator.Iterator { // Creates iterator slice. - icap := len(c.tables) - if c.level == 0 { + icap := len(c.levels) + if c.sourceLevel == 0 { // Special case for level-0. - icap = len(c.tables[0]) + 1 + icap = len(c.levels[0]) + 1 } its := make([]iterator.Iterator, 0, icap) @@ -267,13 +282,13 @@ func (c *compaction) newIterator() iterator.Iterator { ro.Strict |= opt.StrictReader } - for i, tables := range c.tables { + for i, tables := range c.levels { if len(tables) == 0 { continue } // Level-0 is not sorted and may overlaps each other. - if c.level+i == 0 { + if c.sourceLevel+i == 0 { for _, t := range tables { its = append(its, c.s.tops.newIterator(t, nil, ro)) } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go similarity index 72% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go index 405e07b..854e1aa 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go @@ -13,6 +13,7 @@ import ( "strings" "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" ) type byteReader interface { @@ -35,28 +36,28 @@ const ( type cpRecord struct { level int - ikey iKey + ikey internalKey } type atRecord struct { level int - num uint64 - size uint64 - imin iKey - imax iKey + num int64 + size int64 + imin internalKey + imax internalKey } type dtRecord struct { level int - num uint64 + num int64 } type sessionRecord struct { hasRec int comparer string - journalNum uint64 - prevJournalNum uint64 - nextFileNum uint64 + journalNum int64 + prevJournalNum int64 + nextFileNum int64 seqNum uint64 compPtrs []cpRecord addedTables []atRecord @@ -75,17 +76,17 @@ func (p *sessionRecord) setComparer(name string) { p.comparer = name } -func (p *sessionRecord) setJournalNum(num uint64) { +func (p *sessionRecord) setJournalNum(num int64) { p.hasRec |= 1 << recJournalNum p.journalNum = num } -func (p *sessionRecord) setPrevJournalNum(num uint64) { +func (p *sessionRecord) setPrevJournalNum(num int64) { p.hasRec |= 1 << recPrevJournalNum p.prevJournalNum = num } -func (p *sessionRecord) setNextFileNum(num uint64) { +func (p *sessionRecord) setNextFileNum(num int64) { p.hasRec |= 1 << recNextFileNum p.nextFileNum = num } @@ -95,7 +96,7 @@ func (p *sessionRecord) setSeqNum(num uint64) { p.seqNum = num } -func (p *sessionRecord) addCompPtr(level int, ikey iKey) { +func (p *sessionRecord) addCompPtr(level int, ikey internalKey) { p.hasRec |= 1 << recCompPtr p.compPtrs = append(p.compPtrs, cpRecord{level, ikey}) } @@ -105,13 +106,13 @@ func (p *sessionRecord) resetCompPtrs() { p.compPtrs = p.compPtrs[:0] } -func (p *sessionRecord) addTable(level int, num, size uint64, imin, imax iKey) { +func (p *sessionRecord) addTable(level int, num, size int64, imin, imax internalKey) { p.hasRec |= 1 << recAddTable p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax}) } func (p *sessionRecord) addTableFile(level int, t *tFile) { - p.addTable(level, t.file.Num(), t.size, t.imin, t.imax) + p.addTable(level, t.fd.Num, t.size, t.imin, t.imax) } func (p *sessionRecord) resetAddedTables() { @@ -119,7 +120,7 @@ func (p *sessionRecord) resetAddedTables() { p.addedTables = p.addedTables[:0] } -func (p *sessionRecord) delTable(level int, num uint64) { +func (p *sessionRecord) delTable(level int, num int64) { p.hasRec |= 1 << recDelTable p.deletedTables = append(p.deletedTables, dtRecord{level, num}) } @@ -137,6 +138,13 @@ func (p *sessionRecord) putUvarint(w io.Writer, x uint64) { _, p.err = w.Write(p.scratch[:n]) } +func (p *sessionRecord) putVarint(w io.Writer, x int64) { + if x < 0 { + panic("invalid negative value") + } + p.putUvarint(w, uint64(x)) +} + func (p *sessionRecord) putBytes(w io.Writer, x []byte) { if p.err != nil { return @@ -156,11 +164,11 @@ func (p *sessionRecord) encode(w io.Writer) error { } if p.has(recJournalNum) { p.putUvarint(w, recJournalNum) - p.putUvarint(w, p.journalNum) + p.putVarint(w, p.journalNum) } if p.has(recNextFileNum) { p.putUvarint(w, recNextFileNum) - p.putUvarint(w, p.nextFileNum) + p.putVarint(w, p.nextFileNum) } if p.has(recSeqNum) { p.putUvarint(w, recSeqNum) @@ -174,13 +182,13 @@ func (p *sessionRecord) encode(w io.Writer) error { for _, r := range p.deletedTables { p.putUvarint(w, recDelTable) p.putUvarint(w, uint64(r.level)) - p.putUvarint(w, r.num) + p.putVarint(w, r.num) } for _, r := range p.addedTables { p.putUvarint(w, recAddTable) p.putUvarint(w, uint64(r.level)) - p.putUvarint(w, r.num) - p.putUvarint(w, r.size) + p.putVarint(w, r.num) + p.putVarint(w, r.size) p.putBytes(w, r.imin) p.putBytes(w, r.imax) } @@ -194,9 +202,9 @@ func (p *sessionRecord) readUvarintMayEOF(field string, r io.ByteReader, mayEOF x, err := binary.ReadUvarint(r) if err != nil { if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"}) + p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"}) } else if strings.HasPrefix(err.Error(), "binary:") { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, err.Error()}) + p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, err.Error()}) } else { p.err = err } @@ -209,6 +217,14 @@ func (p *sessionRecord) readUvarint(field string, r io.ByteReader) uint64 { return p.readUvarintMayEOF(field, r, false) } +func (p *sessionRecord) readVarint(field string, r io.ByteReader) int64 { + x := int64(p.readUvarintMayEOF(field, r, false)) + if x < 0 { + p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "invalid negative value"}) + } + return x +} + func (p *sessionRecord) readBytes(field string, r byteReader) []byte { if p.err != nil { return nil @@ -221,14 +237,14 @@ func (p *sessionRecord) readBytes(field string, r byteReader) []byte { _, p.err = io.ReadFull(r, x) if p.err != nil { if p.err == io.ErrUnexpectedEOF { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"}) + p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"}) } return nil } return x } -func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int { +func (p *sessionRecord) readLevel(field string, r io.ByteReader) int { if p.err != nil { return 0 } @@ -236,14 +252,10 @@ func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) i if p.err != nil { return 0 } - if x >= uint64(numLevel) { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"}) - return 0 - } return int(x) } -func (p *sessionRecord) decode(r io.Reader, numLevel int) error { +func (p *sessionRecord) decode(r io.Reader) error { br, ok := r.(byteReader) if !ok { br = bufio.NewReader(r) @@ -264,17 +276,17 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error { p.setComparer(string(x)) } case recJournalNum: - x := p.readUvarint("journal-num", br) + x := p.readVarint("journal-num", br) if p.err == nil { p.setJournalNum(x) } case recPrevJournalNum: - x := p.readUvarint("prev-journal-num", br) + x := p.readVarint("prev-journal-num", br) if p.err == nil { p.setPrevJournalNum(x) } case recNextFileNum: - x := p.readUvarint("next-file-num", br) + x := p.readVarint("next-file-num", br) if p.err == nil { p.setNextFileNum(x) } @@ -284,23 +296,23 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error { p.setSeqNum(x) } case recCompPtr: - level := p.readLevel("comp-ptr.level", br, numLevel) + level := p.readLevel("comp-ptr.level", br) ikey := p.readBytes("comp-ptr.ikey", br) if p.err == nil { - p.addCompPtr(level, iKey(ikey)) + p.addCompPtr(level, internalKey(ikey)) } case recAddTable: - level := p.readLevel("add-table.level", br, numLevel) - num := p.readUvarint("add-table.num", br) - size := p.readUvarint("add-table.size", br) + level := p.readLevel("add-table.level", br) + num := p.readVarint("add-table.num", br) + size := p.readVarint("add-table.size", br) imin := p.readBytes("add-table.imin", br) imax := p.readBytes("add-table.imax", br) if p.err == nil { p.addTable(level, num, size, imin, imax) } case recDelTable: - level := p.readLevel("del-table.level", br, numLevel) - num := p.readUvarint("del-table.num", br) + level := p.readLevel("del-table.level", br) + num := p.readVarint("del-table.num", br) if p.err == nil { p.delTable(level, num) } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go similarity index 63% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go index 7ec9f86..674182f 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go @@ -17,15 +17,15 @@ import ( // Logging. type dropper struct { - s *session - file storage.File + s *session + fd storage.FileDesc } func (d dropper) Drop(err error) { if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) + d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason) } else { - d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) + d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err) } } @@ -34,25 +34,9 @@ func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf // File utils. -func (s *session) getJournalFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeJournal) -} - -func (s *session) getTableFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeTable) -} - -func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { - return s.stor.GetFiles(t) -} - -func (s *session) newTemp() storage.File { - num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 - return s.stor.GetFile(num, storage.TypeTemp) -} - -func (s *session) tableFileFromRecord(r atRecord) *tFile { - return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) +func (s *session) newTemp() storage.FileDesc { + num := atomic.AddInt64(&s.stTempFileNum, 1) - 1 + return storage.FileDesc{storage.TypeTemp, num} } // Session state. @@ -80,47 +64,65 @@ func (s *session) setVersion(v *version) { } // Get current unused file number. -func (s *session) nextFileNum() uint64 { - return atomic.LoadUint64(&s.stNextFileNum) +func (s *session) nextFileNum() int64 { + return atomic.LoadInt64(&s.stNextFileNum) } // Set current unused file number to num. -func (s *session) setNextFileNum(num uint64) { - atomic.StoreUint64(&s.stNextFileNum, num) +func (s *session) setNextFileNum(num int64) { + atomic.StoreInt64(&s.stNextFileNum, num) } // Mark file number as used. -func (s *session) markFileNum(num uint64) { +func (s *session) markFileNum(num int64) { nextFileNum := num + 1 for { old, x := s.stNextFileNum, nextFileNum if old > x { x = old } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { break } } } // Allocate a file number. -func (s *session) allocFileNum() uint64 { - return atomic.AddUint64(&s.stNextFileNum, 1) - 1 +func (s *session) allocFileNum() int64 { + return atomic.AddInt64(&s.stNextFileNum, 1) - 1 } // Reuse given file number. -func (s *session) reuseFileNum(num uint64) { +func (s *session) reuseFileNum(num int64) { for { old, x := s.stNextFileNum, num if old != x+1 { x = old } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { break } } } +// Set compaction ptr at given level; need external synchronization. +func (s *session) setCompPtr(level int, ik internalKey) { + if level >= len(s.stCompPtrs) { + newCompPtrs := make([]internalKey, level+1) + copy(newCompPtrs, s.stCompPtrs) + s.stCompPtrs = newCompPtrs + } + s.stCompPtrs[level] = append(internalKey{}, ik...) +} + +// Get compaction ptr at given level; need external synchronization. +func (s *session) getCompPtr(level int) internalKey { + if level >= len(s.stCompPtrs) { + return nil + } + return s.stCompPtrs[level] +} + // Manifest related utils. // Fill given session record obj with current states; need external @@ -149,29 +151,28 @@ func (s *session) fillRecord(r *sessionRecord, snapshot bool) { // Mark if record has been committed, this will update session state; // need external synchronization. -func (s *session) recordCommited(r *sessionRecord) { - if r.has(recJournalNum) { - s.stJournalNum = r.journalNum +func (s *session) recordCommited(rec *sessionRecord) { + if rec.has(recJournalNum) { + s.stJournalNum = rec.journalNum } - if r.has(recPrevJournalNum) { - s.stPrevJournalNum = r.prevJournalNum + if rec.has(recPrevJournalNum) { + s.stPrevJournalNum = rec.prevJournalNum } - if r.has(recSeqNum) { - s.stSeqNum = r.seqNum + if rec.has(recSeqNum) { + s.stSeqNum = rec.seqNum } - for _, p := range r.compPtrs { - s.stCompPtrs[p.level] = iKey(p.ikey) + for _, r := range rec.compPtrs { + s.setCompPtr(r.level, internalKey(r.ikey)) } } // Create a new manifest file; need external synchronization. func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - num := s.allocFileNum() - file := s.stor.GetFile(num, storage.TypeManifest) - writer, err := file.Create() + fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()} + writer, err := s.stor.Create(fd) if err != nil { return } @@ -196,16 +197,16 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { if s.manifestWriter != nil { s.manifestWriter.Close() } - if s.manifestFile != nil { - s.manifestFile.Remove() + if !s.manifestFd.Nil() { + s.stor.Remove(s.manifestFd) } - s.manifestFile = file + s.manifestFd = fd s.manifestWriter = writer s.manifest = jw } else { writer.Close() - file.Remove() - s.reuseFileNum(num) + s.stor.Remove(fd) + s.reuseFileNum(fd.Num) } }() @@ -221,7 +222,7 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { if err != nil { return } - err = s.stor.SetManifest(file) + err = s.stor.SetMeta(fd) return } diff --git a/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go new file mode 100644 index 0000000..cbe1dc1 --- /dev/null +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -0,0 +1,583 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reservefs. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +var ( + errFileOpen = errors.New("leveldb/storage: file still open") + errReadOnly = errors.New("leveldb/storage: storage is read-only") +) + +type fileLock interface { + release() error +} + +type fileStorageLock struct { + fs *fileStorage +} + +func (lock *fileStorageLock) Release() { + if lock.fs != nil { + lock.fs.mu.Lock() + defer lock.fs.mu.Unlock() + if lock.fs.slock == lock { + lock.fs.slock = nil + } + } +} + +const logSizeThreshold = 1024 * 1024 // 1 MiB + +// fileStorage is a file-system backed storage. +type fileStorage struct { + path string + readOnly bool + + mu sync.Mutex + flock fileLock + slock *fileStorageLock + logw *os.File + logSize int64 + buf []byte + // Opened file counter; if open < 0 means closed. + open int + day int +} + +// OpenFile returns a new filesytem-backed storage implementation with the given +// path. This also acquire a file lock, so any subsequent attempt to open the +// same path will fail. +// +// The storage must be closed after use, by calling Close method. +func OpenFile(path string, readOnly bool) (Storage, error) { + if fi, err := os.Stat(path); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path) + } + } else if os.IsNotExist(err) && !readOnly { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + } else { + return nil, err + } + + flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + flock.release() + } + }() + + var ( + logw *os.File + logSize int64 + ) + if !readOnly { + logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + logSize, err = logw.Seek(0, os.SEEK_END) + if err != nil { + logw.Close() + return nil, err + } + } + + fs := &fileStorage{ + path: path, + readOnly: readOnly, + flock: flock, + logw: logw, + logSize: logSize, + } + runtime.SetFinalizer(fs, (*fileStorage).Close) + return fs, nil +} + +func (fs *fileStorage) Lock() (Lock, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + if fs.readOnly { + return &fileStorageLock{}, nil + } + if fs.slock != nil { + return nil, ErrLocked + } + fs.slock = &fileStorageLock{fs: fs} + return fs.slock, nil +} + +func itoa(buf []byte, i int, wid int) []byte { + u := uint(i) + if u == 0 && wid <= 1 { + return append(buf, '0') + } + + // Assemble decimal in reverse order. + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + return append(buf, b[bp:]...) +} + +func (fs *fileStorage) printDay(t time.Time) { + if fs.day == t.Day() { + return + } + fs.day = t.Day() + fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) +} + +func (fs *fileStorage) doLog(t time.Time, str string) { + if fs.logSize > logSizeThreshold { + // Rotate log file. + fs.logw.Close() + fs.logw = nil + fs.logSize = 0 + rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")) + } + if fs.logw == nil { + var err error + fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return + } + // Force printDay on new log file. + fs.day = 0 + } + fs.printDay(t) + hour, min, sec := t.Clock() + msec := t.Nanosecond() / 1e3 + // time + fs.buf = itoa(fs.buf[:0], hour, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, min, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, sec, 2) + fs.buf = append(fs.buf, '.') + fs.buf = itoa(fs.buf, msec, 6) + fs.buf = append(fs.buf, ' ') + // write + fs.buf = append(fs.buf, []byte(str)...) + fs.buf = append(fs.buf, '\n') + fs.logw.Write(fs.buf) +} + +func (fs *fileStorage) Log(str string) { + if !fs.readOnly { + t := time.Now() + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return + } + fs.doLog(t, str) + } +} + +func (fs *fileStorage) log(str string) { + if !fs.readOnly { + fs.doLog(time.Now(), str) + } +} + +func (fs *fileStorage) SetMeta(fd FileDesc) (err error) { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + defer func() { + if err != nil { + fs.log(fmt.Sprintf("CURRENT: %v", err)) + } + }() + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return + } + _, err = fmt.Fprintln(w, fsGenName(fd)) + // Close the file first. + if cerr := w.Close(); cerr != nil { + fs.log(fmt.Sprintf("close CURRENT.%d: %v", fd.Num, cerr)) + } + if err != nil { + return + } + return rename(path, filepath.Join(fs.path, "CURRENT")) +} + +func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return FileDesc{}, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if ce := dir.Close(); ce != nil { + fs.log(fmt.Sprintf("close dir: %v", ce)) + } + if err != nil { + return + } + // Find latest CURRENT file. + var rem []string + var pend bool + var cerr error + for _, name := range names { + if strings.HasPrefix(name, "CURRENT") { + pend1 := len(name) > 7 + var pendNum int64 + // Make sure it is valid name for a CURRENT file, otherwise skip it. + if pend1 { + if name[7] != '.' || len(name) < 9 { + fs.log(fmt.Sprintf("skipping %s: invalid file name", name)) + continue + } + var e1 error + if pendNum, e1 = strconv.ParseInt(name[8:], 10, 0); e1 != nil { + fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", name, e1)) + continue + } + } + path := filepath.Join(fs.path, name) + r, e1 := os.OpenFile(path, os.O_RDONLY, 0) + if e1 != nil { + return FileDesc{}, e1 + } + b, e1 := ioutil.ReadAll(r) + if e1 != nil { + r.Close() + return FileDesc{}, e1 + } + var fd1 FileDesc + if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd1) { + fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", name)) + if pend1 { + rem = append(rem, name) + } + if !pend1 || cerr == nil { + metaFd, _ := fsParseName(name) + cerr = &ErrCorrupted{ + Fd: metaFd, + Err: errors.New("leveldb/storage: corrupted or incomplete meta file"), + } + } + } else if pend1 && pendNum != fd1.Num { + fs.log(fmt.Sprintf("skipping %s: inconsistent pending-file num: %d vs %d", name, pendNum, fd1.Num)) + rem = append(rem, name) + } else if fd1.Num < fd.Num { + fs.log(fmt.Sprintf("skipping %s: obsolete", name)) + if pend1 { + rem = append(rem, name) + } + } else { + fd = fd1 + pend = pend1 + } + if err := r.Close(); err != nil { + fs.log(fmt.Sprintf("close %s: %v", name, err)) + } + } + } + // Don't remove any files if there is no valid CURRENT file. + if fd.Nil() { + if cerr != nil { + err = cerr + } else { + err = os.ErrNotExist + } + return + } + if !fs.readOnly { + // Rename pending CURRENT file to an effective CURRENT. + if pend { + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { + fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", fd.Num, err)) + } + } + // Remove obsolete or incomplete pending CURRENT files. + for _, name := range rem { + path := filepath.Join(fs.path, name) + if err := os.Remove(path); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", name, err)) + } + } + } + return +} + +func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if cerr := dir.Close(); cerr != nil { + fs.log(fmt.Sprintf("close dir: %v", cerr)) + } + if err == nil { + for _, name := range names { + if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 { + fds = append(fds, fd) + } + } + } + return +} + +func (fs *fileStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0) + if err == nil { + goto ok + } + } + return nil, err + } +ok: + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil +} + +func (fs *fileStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + if fs.readOnly { + return nil, errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil, err + } + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil +} + +func (fs *fileStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + err := os.Remove(filepath.Join(fs.path, fsGenName(fd))) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) { + fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err)) + err = e1 + } + } else { + fs.log(fmt.Sprintf("remove %s: %v", fd, err)) + } + } + return err +} + +func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { + return ErrInvalidFile + } + if oldfd == newfd { + return nil + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd))) +} + +func (fs *fileStorage) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + // Clear the finalizer. + runtime.SetFinalizer(fs, nil) + + if fs.open > 0 { + fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) + } + fs.open = -1 + if fs.logw != nil { + fs.logw.Close() + } + return fs.flock.release() +} + +type fileWrap struct { + *os.File + fs *fileStorage + fd FileDesc + closed bool +} + +func (fw *fileWrap) Sync() error { + if err := fw.File.Sync(); err != nil { + return err + } + if fw.fd.Type == TypeManifest { + // Also sync parent directory if file type is manifest. + // See: https://code.google.com/p/leveldb/issues/detail?id=190. + if err := syncDir(fw.fs.path); err != nil { + fw.fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + } + return nil +} + +func (fw *fileWrap) Close() error { + fw.fs.mu.Lock() + defer fw.fs.mu.Unlock() + if fw.closed { + return ErrClosed + } + fw.closed = true + fw.fs.open-- + err := fw.File.Close() + if err != nil { + fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err)) + } + return err +} + +func fsGenName(fd FileDesc) string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + panic("invalid file type") + } +} + +func fsHasOldName(fd FileDesc) bool { + return fd.Type == TypeTable +} + +func fsGenOldName(fd FileDesc) string { + switch fd.Type { + case TypeTable: + return fmt.Sprintf("%06d.sst", fd.Num) + } + return fsGenName(fd) +} + +func fsParseName(name string) (fd FileDesc, ok bool) { + var tail string + _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail) + if err == nil { + switch tail { + case "log": + fd.Type = TypeJournal + case "ldb", "sst": + fd.Type = TypeTable + case "tmp": + fd.Type = TypeTemp + default: + return + } + return fd, true + } + n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail) + if n == 1 { + fd.Type = TypeManifest + return fd, true + } + return +} + +func fsParseNamePtr(name string, fd *FileDesc) bool { + _fd, ok := fsParseName(name) + if fd != nil { + *fd = _fd + } + return ok +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go similarity index 71% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go index 42940d7..bab62bf 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go @@ -19,8 +19,21 @@ func (fl *plan9FileLock) release() error { return fl.f.Close() } -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var ( + flag int + perm os.FileMode + ) + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + perm = os.ModeExclusive + } + f, err := os.OpenFile(path, flag, perm) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644) + } if err != nil { return } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go similarity index 63% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go index 102031b..79901ee 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go @@ -18,18 +18,27 @@ type unixFileLock struct { } func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { + if err := setFileLock(fl.f, false, false); err != nil { return err } return fl.f.Close() } -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } if err != nil { return } - err = setFileLock(f, true) + err = setFileLock(f, readOnly, true) if err != nil { f.Close() return @@ -38,7 +47,7 @@ func newFileLock(path string) (fl fileLock, err error) { return } -func setFileLock(f *os.File, lock bool) error { +func setFileLock(f *os.File, readOnly, lock bool) error { flock := syscall.Flock_t{ Type: syscall.F_UNLCK, Start: 0, @@ -46,7 +55,11 @@ func setFileLock(f *os.File, lock bool) error { Whence: 1, } if lock { - flock.Type = syscall.F_WRLCK + if readOnly { + flock.Type = syscall.F_RDLCK + } else { + flock.Type = syscall.F_WRLCK + } } return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go similarity index 68% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go index 6eb3274..7e29915 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -18,18 +18,27 @@ type unixFileLock struct { } func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { + if err := setFileLock(fl.f, false, false); err != nil { return err } return fl.f.Close() } -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } if err != nil { return } - err = setFileLock(f, true) + err = setFileLock(f, readOnly, true) if err != nil { f.Close() return @@ -38,10 +47,14 @@ func newFileLock(path string) (fl fileLock, err error) { return } -func setFileLock(f *os.File, lock bool) error { +func setFileLock(f *os.File, readOnly, lock bool) error { how := syscall.LOCK_UN if lock { - how = syscall.LOCK_EX + if readOnly { + how = syscall.LOCK_SH + } else { + how = syscall.LOCK_EX + } } return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go similarity index 69% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go index 50c3c45..899335f 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go @@ -29,12 +29,22 @@ func (fl *windowsFileLock) release() error { return syscall.Close(fl.fd) } -func newFileLock(path string) (fl fileLock, err error) { +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { pathp, err := syscall.UTF16PtrFromString(path) if err != nil { return } - fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + var access, shareMode uint32 + if readOnly { + access = syscall.GENERIC_READ + shareMode = syscall.FILE_SHARE_READ + } else { + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if err == syscall.ERROR_FILE_NOT_FOUND { + fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + } if err != nil { return } @@ -47,9 +57,8 @@ func moveFileEx(from *uint16, to *uint16, flags uint32) error { if r1 == 0 { if e1 != 0 { return error(e1) - } else { - return syscall.EINVAL } + return syscall.EINVAL } return nil } diff --git a/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go new file mode 100644 index 0000000..9b70e15 --- /dev/null +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go @@ -0,0 +1,218 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "os" + "sync" +) + +const typeShift = 3 + +type memStorageLock struct { + ms *memStorage +} + +func (lock *memStorageLock) Release() { + ms := lock.ms + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock == lock { + ms.slock = nil + } + return +} + +// memStorage is a memory-backed storage. +type memStorage struct { + mu sync.Mutex + slock *memStorageLock + files map[uint64]*memFile + meta FileDesc +} + +// NewMemStorage returns a new memory-backed storage implementation. +func NewMemStorage() Storage { + return &memStorage{ + files: make(map[uint64]*memFile), + } +} + +func (ms *memStorage) Lock() (Lock, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock != nil { + return nil, ErrLocked + } + ms.slock = &memStorageLock{ms: ms} + return ms.slock, nil +} + +func (*memStorage) Log(str string) {} + +func (ms *memStorage) SetMeta(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + + ms.mu.Lock() + ms.meta = fd + ms.mu.Unlock() + return nil +} + +func (ms *memStorage) GetMeta() (FileDesc, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.meta.Nil() { + return FileDesc{}, os.ErrNotExist + } + return ms.meta, nil +} + +func (ms *memStorage) List(ft FileType) ([]FileDesc, error) { + ms.mu.Lock() + var fds []FileDesc + for x, _ := range ms.files { + fd := unpackFile(x) + if fd.Type&ft != 0 { + fds = append(fds, fd) + } + } + ms.mu.Unlock() + return fds, nil +} + +func (ms *memStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + ms.mu.Lock() + defer ms.mu.Unlock() + if m, exist := ms.files[packFile(fd)]; exist { + if m.open { + return nil, errFileOpen + } + m.open = true + return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil + } + return nil, os.ErrNotExist +} + +func (ms *memStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + x := packFile(fd) + ms.mu.Lock() + defer ms.mu.Unlock() + m, exist := ms.files[x] + if exist { + if m.open { + return nil, errFileOpen + } + m.Reset() + } else { + m = &memFile{} + ms.files[x] = m + } + m.open = true + return &memWriter{memFile: m, ms: ms}, nil +} + +func (ms *memStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + + x := packFile(fd) + ms.mu.Lock() + defer ms.mu.Unlock() + if _, exist := ms.files[x]; exist { + delete(ms.files, x) + return nil + } + return os.ErrNotExist +} + +func (ms *memStorage) Rename(oldfd, newfd FileDesc) error { + if FileDescOk(oldfd) || FileDescOk(newfd) { + return ErrInvalidFile + } + if oldfd == newfd { + return nil + } + + oldx := packFile(oldfd) + newx := packFile(newfd) + ms.mu.Lock() + defer ms.mu.Unlock() + oldm, exist := ms.files[oldx] + if !exist { + return os.ErrNotExist + } + newm, exist := ms.files[newx] + if (exist && newm.open) || oldm.open { + return errFileOpen + } + delete(ms.files, oldx) + ms.files[newx] = oldm + return nil +} + +func (*memStorage) Close() error { return nil } + +type memFile struct { + bytes.Buffer + open bool +} + +type memReader struct { + *bytes.Reader + ms *memStorage + m *memFile + closed bool +} + +func (mr *memReader) Close() error { + mr.ms.mu.Lock() + defer mr.ms.mu.Unlock() + if mr.closed { + return ErrClosed + } + mr.m.open = false + return nil +} + +type memWriter struct { + *memFile + ms *memStorage + closed bool +} + +func (*memWriter) Sync() error { return nil } + +func (mw *memWriter) Close() error { + mw.ms.mu.Lock() + defer mw.ms.mu.Unlock() + if mw.closed { + return ErrClosed + } + mw.memFile.open = false + return nil +} + +func packFile(fd FileDesc) uint64 { + return uint64(fd.Num)<> typeShift)} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go similarity index 53% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go index a4e037c..9b30b67 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -15,7 +15,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/util" ) -type FileType uint32 +type FileType int const ( TypeManifest FileType = 1 << iota @@ -50,13 +50,13 @@ var ( // a file. Package storage has its own type instead of using // errors.ErrCorrupted to prevent circular import. type ErrCorrupted struct { - File *FileInfo - Err error + Fd FileDesc + Err error } func (e *ErrCorrupted) Error() string { - if e.File != nil { - return fmt.Sprintf("%v [file=%v]", e.Err, e.File) + if !e.Fd.Nil() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) } else { return e.Err.Error() } @@ -83,31 +83,47 @@ type Writer interface { Syncer } -// File is the file. A file instance must be goroutine-safe. -type File interface { - // Open opens the file for read. Returns os.ErrNotExist error - // if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open() (r Reader, err error) +type Lock interface { + util.Releaser +} - // Create creates the file for writting. Truncate the file if - // already exist. - // Returns ErrClosed if the underlying storage is closed. - Create() (w Writer, err error) +// FileDesc is a file descriptor. +type FileDesc struct { + Type FileType + Num int64 +} - // Replace replaces file with newfile. - // Returns ErrClosed if the underlying storage is closed. - Replace(newfile File) error +func (fd FileDesc) String() string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + return fmt.Sprintf("%#x-%d", fd.Type, fd.Num) + } +} - // Type returns the file type - Type() FileType +// Nil returns true if fd == (FileDesc{}). +func (fd FileDesc) Nil() bool { + return fd == (FileDesc{}) +} - // Num returns the file number. - Num() uint64 - - // Remove removes the file. - // Returns ErrClosed if the underlying storage is closed. - Remove() error +// FileDescOk returns true if fd is a valid file descriptor. +func FileDescOk(fd FileDesc) bool { + switch fd.Type { + case TypeManifest: + case TypeJournal: + case TypeTable: + case TypeTemp: + default: + return false + } + return fd.Num >= 0 } // Storage is the storage. A storage instance must be goroutine-safe. @@ -115,59 +131,47 @@ type Storage interface { // Lock locks the storage. Any subsequent attempt to call Lock will fail // until the last lock released. // After use the caller should call the Release method. - Lock() (l util.Releaser, err error) + Lock() (Lock, error) - // Log logs a string. This is used for logging. An implementation - // may write to a file, stdout or simply do nothing. + // Log logs a string. This is used for logging. + // An implementation may write to a file, stdout or simply do nothing. Log(str string) - // GetFile returns a file for the given number and type. GetFile will never - // returns nil, even if the underlying storage is closed. - GetFile(num uint64, t FileType) File + // SetMeta sets to point to the given fd, which then can be acquired using + // GetMeta method. + // SetMeta should be implemented in such way that changes should happened + // atomically. + SetMeta(fd FileDesc) error - // GetFiles returns a slice of files that match the given file types. + // GetManifest returns a manifest file. + // Returns os.ErrNotExist if meta doesn't point to any fd, or point to fd + // that doesn't exist. + GetMeta() (FileDesc, error) + + // List returns fds that match the given file types. // The file types may be OR'ed together. - GetFiles(t FileType) ([]File, error) + List(ft FileType) ([]FileDesc, error) - // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest - // file does not exist. - GetManifest() (File, error) + // Open opens file with the given fd read-only. + // Returns os.ErrNotExist error if the file does not exist. + // Returns ErrClosed if the underlying storage is closed. + Open(fd FileDesc) (Reader, error) - // SetManifest sets the given file as manifest file. The given file should - // be a manifest file type or error will be returned. - SetManifest(f File) error + // Create creates file with the given fd, truncate if already exist and + // opens write-only. + // Returns ErrClosed if the underlying storage is closed. + Create(fd FileDesc) (Writer, error) - // Close closes the storage. It is valid to call Close multiple times. - // Other methods should not be called after the storage has been closed. + // Remove removes file with the given fd. + // Returns ErrClosed if the underlying storage is closed. + Remove(fd FileDesc) error + + // Rename renames file from oldfd to newfd. + // Returns ErrClosed if the underlying storage is closed. + Rename(oldfd, newfd FileDesc) error + + // Close closes the storage. + // It is valid to call Close multiple times. Other methods should not be + // called after the storage has been closed. Close() error } - -// FileInfo wraps basic file info. -type FileInfo struct { - Type FileType - Num uint64 -} - -func (fi FileInfo) String() string { - switch fi.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fi.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fi.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fi.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fi.Num) - default: - return fmt.Sprintf("%#x-%d", fi.Type, fi.Num) - } -} - -// NewFileInfo creates new FileInfo from the given File. It will returns nil -// if File is nil. -func NewFileInfo(f File) *FileInfo { - if f == nil { - return nil - } - return &FileInfo{f.Type(), f.Num()} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/table.go similarity index 84% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/table.go index 37be47a..310ba6c 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/table.go @@ -21,10 +21,10 @@ import ( // tFile holds basic information about a table. type tFile struct { - file storage.File + fd storage.FileDesc seekLeft int32 - size uint64 - imin, imax iKey + size int64 + imin, imax internalKey } // Returns true if given key is after largest key of this table. @@ -48,9 +48,9 @@ func (t *tFile) consumeSeek() int32 { } // Creates new tFile. -func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { +func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { f := &tFile{ - file: file, + fd: fd, size: size, imin: imin, imax: imax, @@ -77,6 +77,10 @@ func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { return f } +func tableFileFromRecord(r atRecord) *tFile { + return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax) +} + // tFiles hold multiple tFile. type tFiles []*tFile @@ -89,7 +93,7 @@ func (tf tFiles) nums() string { if i != 0 { x += ", " } - x += fmt.Sprint(f.file.Num()) + x += fmt.Sprint(f.fd.Num) } x += " ]" return x @@ -101,7 +105,7 @@ func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { a, b := tf[i], tf[j] n := icmp.Compare(a.imin, b.imin) if n == 0 { - return a.file.Num() < b.file.Num() + return a.fd.Num < b.fd.Num } return n < 0 } @@ -109,7 +113,7 @@ func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { // Returns true if i file number is greater than j. // This used for sort by file number in descending order. func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].file.Num() > tf[j].file.Num() + return tf[i].fd.Num > tf[j].fd.Num } // Sorts tables by key in ascending order. @@ -123,7 +127,7 @@ func (tf tFiles) sortByNum() { } // Returns sum of all tables size. -func (tf tFiles) size() (sum uint64) { +func (tf tFiles) size() (sum int64) { for _, t := range tf { sum += t.size } @@ -132,7 +136,7 @@ func (tf tFiles) size() (sum uint64) { // Searches smallest index of tables whose its smallest // key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { +func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imin, ikey) >= 0 }) @@ -140,7 +144,7 @@ func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { // Searches smallest index of tables whose its largest // key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { +func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imax, ikey) >= 0 }) @@ -162,7 +166,7 @@ func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) boo i := 0 if len(umin) > 0 { // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek)) + i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) } if i >= len(tf) { // Beginning of range is after all files, so no overlap. @@ -205,7 +209,7 @@ func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, ove } // Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { +func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { for i, t := range tf { if i == 0 { imin, imax = t.imin, t.imax @@ -227,10 +231,10 @@ func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range if slice != nil { var start, limit int if slice.Start != nil { - start = tf.searchMax(icmp, iKey(slice.Start)) + start = tf.searchMax(icmp, internalKey(slice.Start)) } if slice.Limit != nil { - limit = tf.searchMin(icmp, iKey(slice.Limit)) + limit = tf.searchMin(icmp, internalKey(slice.Limit)) } else { limit = tf.Len() } @@ -255,7 +259,7 @@ type tFilesArrayIndexer struct { } func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, iKey(key)) + return a.searchMax(a.icmp, internalKey(key)) } func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { @@ -295,16 +299,16 @@ type tOps struct { // Creates an empty table and returns table writer. func (t *tOps) create() (*tWriter, error) { - file := t.s.getTableFile(t.s.allocFileNum()) - fw, err := file.Create() + fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()} + fw, err := t.s.stor.Create(fd) if err != nil { return nil, err } return &tWriter{ - t: t, - file: file, - w: fw, - tw: table.NewWriter(fw, t.s.o.Options), + t: t, + fd: fd, + w: fw, + tw: table.NewWriter(fw, t.s.o.Options), }, nil } @@ -340,21 +344,20 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { // Opens table. It returns a cache handle, which should // be released after use. func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - num := f.file.Num() - ch = t.cache.Get(0, num, func() (size int, value cache.Value) { + ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { var r storage.Reader - r, err = f.file.Open() + r, err = t.s.stor.Open(f.fd) if err != nil { return 0, nil } - var bcache *cache.CacheGetter + var bcache *cache.NamespaceGetter if t.bcache != nil { - bcache = &cache.CacheGetter{Cache: t.bcache, NS: num} + bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} } var tr *table.Reader - tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options) + tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) if err != nil { r.Close() return 0, nil @@ -390,14 +393,13 @@ func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, } // Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { +func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { ch, err := t.open(f) if err != nil { return } defer ch.Release() - offset_, err := ch.Value().(*table.Reader).OffsetOf(key) - return uint64(offset_), err + return ch.Value().(*table.Reader).OffsetOf(key) } // Creates an iterator from the given table. @@ -414,15 +416,14 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite // Removes table from persistent storage. It waits until // no one use the the table. func (t *tOps) remove(f *tFile) { - num := f.file.Num() - t.cache.Delete(0, num, func() { - if err := f.file.Remove(); err != nil { - t.s.logf("table@remove removing @%d %q", num, err) + t.cache.Delete(0, uint64(f.fd.Num), func() { + if err := t.s.stor.Remove(f.fd); err != nil { + t.s.logf("table@remove removing @%d %q", f.fd.Num, err) } else { - t.s.logf("table@remove removed @%d", num) + t.s.logf("table@remove removed @%d", f.fd.Num) } if t.bcache != nil { - t.bcache.EvictNS(num) + t.bcache.EvictNS(uint64(f.fd.Num)) } }) } @@ -471,9 +472,9 @@ func newTableOps(s *session) *tOps { type tWriter struct { t *tOps - file storage.File - w storage.Writer - tw *table.Writer + fd storage.FileDesc + w storage.Writer + tw *table.Writer first, last []byte } @@ -513,16 +514,15 @@ func (w *tWriter) finish() (f *tFile, err error) { return } } - f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) + f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) return } // Drops the table. func (w *tWriter) drop() { w.close() - w.file.Remove() - w.t.s.reuseFileNum(w.file.Num()) - w.file = nil + w.t.s.stor.Remove(w.fd) + w.t.s.reuseFileNum(w.fd.Num) w.tw = nil w.first = nil w.last = nil diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go similarity index 98% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go index 23c7c61..ae61bec 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go @@ -507,9 +507,9 @@ func (i *indexIter) Get() iterator.Iterator { // Reader is a table reader. type Reader struct { mu sync.RWMutex - fi *storage.FileInfo + fd storage.FileDesc reader io.ReaderAt - cache *cache.CacheGetter + cache *cache.NamespaceGetter err error bpool *util.BufferPool // Options @@ -539,7 +539,7 @@ func (r *Reader) blockKind(bh blockHandle) string { } func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { - return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} + return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} } func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { @@ -551,7 +551,7 @@ func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { cerr.Pos = int64(bh.offset) cerr.Size = int64(bh.length) cerr.Kind = r.blockKind(bh) - return &errors.ErrCorrupted{File: r.fi, Err: cerr} + return &errors.ErrCorrupted{Fd: r.fd, Err: cerr} } return err } @@ -988,13 +988,13 @@ func (r *Reader) Release() { // The fi, cache and bpool is optional and can be nil. // // The returned table reader instance is goroutine-safe. -func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { +func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { if f == nil { return nil, errors.New("leveldb/table: nil file") } r := &Reader{ - fi: fi, + fd: fd, reader: f, cache: cache, bpool: bpool, diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util.go similarity index 83% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/util.go index 1a5bf71..3b663d1 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util.go @@ -72,20 +72,20 @@ func maxInt(a, b int) int { return b } -type files []storage.File +type fdSorter []storage.FileDesc -func (p files) Len() int { +func (p fdSorter) Len() int { return len(p) } -func (p files) Less(i, j int) bool { - return p[i].Num() < p[j].Num() +func (p fdSorter) Less(i, j int) bool { + return p[i].Num < p[j].Num } -func (p files) Swap(i, j int) { +func (p fdSorter) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p files) sort() { - sort.Sort(p) +func sortFds(fds []storage.FileDesc) { + sort.Sort(fdSorter(fds)) } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go similarity index 60% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go index 5490366..7f3fa4e 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go @@ -7,38 +7,38 @@ package util import ( - "bytes" "encoding/binary" ) // Hash return hash of the given data. func Hash(data []byte, seed uint32) uint32 { // Similar to murmur hash - var m uint32 = 0xc6a4a793 - var r uint32 = 24 - h := seed ^ (uint32(len(data)) * m) + const ( + m = uint32(0xc6a4a793) + r = uint32(24) + ) + var ( + h = seed ^ (uint32(len(data)) * m) + i int + ) - buf := bytes.NewBuffer(data) - for buf.Len() >= 4 { - var w uint32 - binary.Read(buf, binary.LittleEndian, &w) - h += w + for n := len(data) - len(data)%4; i < n; i += 4 { + h += binary.LittleEndian.Uint32(data[i:]) h *= m h ^= (h >> 16) } - rest := buf.Bytes() - switch len(rest) { + switch len(data) - i { default: panic("not reached") case 3: - h += uint32(rest[2]) << 16 + h += uint32(data[i+2]) << 16 fallthrough case 2: - h += uint32(rest[1]) << 8 + h += uint32(data[i+1]) << 8 fallthrough case 1: - h += uint32(rest[0]) + h += uint32(data[i]) h *= m h ^= (h >> r) case 0: diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go similarity index 100% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/version.go similarity index 52% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go rename to cmd/vendor/github.com/syndtr/goleveldb/leveldb/version.go index 011d982..d274eef 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go +++ b/cmd/vendor/github.com/syndtr/goleveldb/leveldb/version.go @@ -7,6 +7,7 @@ package leveldb import ( + "fmt" "sync/atomic" "unsafe" @@ -23,7 +24,7 @@ type tSet struct { type version struct { s *session - tables []tFiles + levels []tFiles // Level that should be compacted next and its compaction score. // Score < 1 means compaction is not strictly needed. These fields @@ -39,7 +40,7 @@ type version struct { } func newVersion(s *session) *version { - return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())} + return &version{s: s} } func (v *version) releaseNB() { @@ -51,18 +52,18 @@ func (v *version) releaseNB() { panic("negative version ref") } - tables := make(map[uint64]bool) - for _, tt := range v.next.tables { + nextTables := make(map[int64]bool) + for _, tt := range v.next.levels { for _, t := range tt { - num := t.file.Num() - tables[num] = true + num := t.fd.Num + nextTables[num] = true } } - for _, tt := range v.tables { + for _, tt := range v.levels { for _, t := range tt { - num := t.file.Num() - if _, ok := tables[num]; !ok { + num := t.fd.Num + if _, ok := nextTables[num]; !ok { v.s.tops.remove(t) } } @@ -78,11 +79,26 @@ func (v *version) release() { v.s.vmu.Unlock() } -func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { +func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) { ukey := ikey.ukey() + // Aux level. + if aux != nil { + for _, t := range aux { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(-1, t) { + return + } + } + } + + if lf != nil && !lf(-1) { + return + } + } + // Walk tables level-by-level. - for level, tables := range v.tables { + for level, tables := range v.levels { if len(tables) == 0 { continue } @@ -114,7 +130,7 @@ func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, l } } -func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { +func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { ukey := ikey.ukey() var ( @@ -124,16 +140,16 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt // Level-0. zfound bool zseq uint64 - zkt kType + zkt keyType zval []byte ) err = ErrNotFound - // Since entries never hope across level, finding key/value + // Since entries never hop across level, finding key/value // in smaller level make later levels irrelevant. - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if !tseek { + v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool { + if level >= 0 && !tseek { if tset == nil { tset = &tSet{level, t} } else { @@ -150,6 +166,7 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt } else { fikey, fval, ferr = v.s.tops.find(t, ikey, ro) } + switch ferr { case nil: case ErrNotFound: @@ -159,9 +176,10 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt return false } - if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil { + if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil { if v.s.icmp.uCompare(ukey, fukey) == 0 { - if level == 0 { + // Level <= 0 may overlaps each-other. + if level <= 0 { if fseq >= zseq { zfound = true zseq = fseq @@ -170,12 +188,12 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt } } else { switch fkt { - case ktVal: + case keyTypeVal: value = fval err = nil - case ktDel: + case keyTypeDel: default: - panic("leveldb: invalid iKey type") + panic("leveldb: invalid internalKey type") } return false } @@ -189,12 +207,12 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt }, func(level int) bool { if zfound { switch zkt { - case ktVal: + case keyTypeVal: value = zval err = nil - case ktDel: + case keyTypeDel: default: - panic("leveldb: invalid iKey type") + panic("leveldb: invalid internalKey type") } return false } @@ -209,46 +227,40 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt return } -func (v *version) sampleSeek(ikey iKey) (tcomp bool) { +func (v *version) sampleSeek(ikey internalKey) (tcomp bool) { var tset *tSet - v.walkOverlapping(ikey, func(level int, t *tFile) bool { + v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool { if tset == nil { tset = &tSet{level, t} return true - } else { - if tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - return false } + if tset.table.consumeSeek() <= 0 { + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + return false }, nil) return } func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - // Merge all level zero files together since they may overlap - for _, t := range v.tables[0] { - it := v.s.tops.newIterator(t, slice, ro) - its = append(its, it) - } - strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) - for _, tables := range v.tables[1:] { - if len(tables) == 0 { - continue + for level, tables := range v.levels { + if level == 0 { + // Merge all level zero files together since they may overlap. + for _, t := range tables { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + } else if len(tables) != 0 { + its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)) } - - it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict) - its = append(its, it) } - return } func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())} + return &versionStaging{base: v} } // Spawn a new version based on this version. @@ -259,19 +271,22 @@ func (v *version) spawn(r *sessionRecord) *version { } func (v *version) fillRecord(r *sessionRecord) { - for level, ts := range v.tables { - for _, t := range ts { + for level, tables := range v.levels { + for _, t := range tables { r.addTableFile(level, t) } } } func (v *version) tLen(level int) int { - return len(v.tables[level]) + if level < len(v.levels) { + return len(v.levels[level]) + } + return 0 } -func (v *version) offsetOf(ikey iKey) (n uint64, err error) { - for level, tables := range v.tables { +func (v *version) offsetOf(ikey internalKey) (n int64, err error) { + for level, tables := range v.levels { for _, t := range tables { if v.s.icmp.Compare(t.imax, ikey) <= 0 { // Entire file is before "ikey", so just add the file size @@ -287,12 +302,11 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) { } else { // "ikey" falls in the range for this table. Add the // approximate offset of "ikey" within the table. - var nn uint64 - nn, err = v.s.tops.offsetOf(t, ikey) - if err != nil { + if m, err := v.s.tops.offsetOf(t, ikey); err == nil { + n += m + } else { return 0, err } - n += nn } } } @@ -300,37 +314,50 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) { return } -func (v *version) pickMemdbLevel(umin, umax []byte) (level int) { - if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - maxLevel := v.s.o.GetMaxMemCompationLevel() - for ; level < maxLevel; level++ { - if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { - break - } - overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) { - break +func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) { + if maxLevel > 0 { + if len(v.levels) == 0 { + return maxLevel + } + if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) { + var overlaps tFiles + for ; level < maxLevel; level++ { + if pLevel := level + 1; pLevel >= len(v.levels) { + return maxLevel + } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) { + break + } + if gpLevel := level + 2; gpLevel < len(v.levels) { + overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false) + if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) { + break + } + } } } } - return } func (v *version) computeCompaction() { // Precomputed best level for next compaction - var bestLevel int = -1 - var bestScore float64 = -1 + bestLevel := int(-1) + bestScore := float64(-1) - for level, tables := range v.tables { + statFiles := make([]int, len(v.levels)) + statSizes := make([]string, len(v.levels)) + statScore := make([]string, len(v.levels)) + statTotSize := int64(0) + + for level, tables := range v.levels { var score float64 + size := tables.size() if level == 0 { // We treat level-0 specially by bounding the number of files // instead of number of bytes for two reasons: // // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compactions. + // many level-0 compaction. // // (2) The files in level-0 are merged on every read and // therefore we wish to avoid too many files when the individual @@ -339,17 +366,24 @@ func (v *version) computeCompaction() { // overwrites/deletions). score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) } else { - score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level)) + score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level)) } if score > bestScore { bestLevel = level bestScore = score } + + statFiles[level] = len(tables) + statSizes[level] = shortenb(int(size)) + statScore[level] = fmt.Sprintf("%.2f", score) + statTotSize += size } v.cLevel = bestLevel v.cScore = bestScore + + v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore) } func (v *version) needCompaction() bool { @@ -357,43 +391,48 @@ func (v *version) needCompaction() bool { } type tablesScratch struct { - added map[uint64]atRecord - deleted map[uint64]struct{} + added map[int64]atRecord + deleted map[int64]struct{} } type versionStaging struct { base *version - tables []tablesScratch + levels []tablesScratch +} + +func (p *versionStaging) getScratch(level int) *tablesScratch { + if level >= len(p.levels) { + newLevels := make([]tablesScratch, level+1) + copy(newLevels, p.levels) + p.levels = newLevels + } + return &(p.levels[level]) } func (p *versionStaging) commit(r *sessionRecord) { // Deleted tables. for _, r := range r.deletedTables { - tm := &(p.tables[r.level]) - - if len(p.base.tables[r.level]) > 0 { - if tm.deleted == nil { - tm.deleted = make(map[uint64]struct{}) + scratch := p.getScratch(r.level) + if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 { + if scratch.deleted == nil { + scratch.deleted = make(map[int64]struct{}) } - tm.deleted[r.num] = struct{}{} + scratch.deleted[r.num] = struct{}{} } - - if tm.added != nil { - delete(tm.added, r.num) + if scratch.added != nil { + delete(scratch.added, r.num) } } // New tables. for _, r := range r.addedTables { - tm := &(p.tables[r.level]) - - if tm.added == nil { - tm.added = make(map[uint64]atRecord) + scratch := p.getScratch(r.level) + if scratch.added == nil { + scratch.added = make(map[int64]atRecord) } - tm.added[r.num] = r - - if tm.deleted != nil { - delete(tm.deleted, r.num) + scratch.added[r.num] = r + if scratch.deleted != nil { + delete(scratch.deleted, r.num) } } } @@ -401,39 +440,62 @@ func (p *versionStaging) commit(r *sessionRecord) { func (p *versionStaging) finish() *version { // Build new version. nv := newVersion(p.base.s) - for level, tm := range p.tables { - btables := p.base.tables[level] - - n := len(btables) + len(tm.added) - len(tm.deleted) - if n < 0 { - n = 0 - } - nt := make(tFiles, 0, n) - - // Base tables. - for _, t := range btables { - if _, ok := tm.deleted[t.file.Num()]; ok { - continue - } - if _, ok := tm.added[t.file.Num()]; ok { - continue - } - nt = append(nt, t) - } - - // New tables. - for _, r := range tm.added { - nt = append(nt, p.base.s.tableFileFromRecord(r)) - } - - // Sort tables. - if level == 0 { - nt.sortByNum() - } else { - nt.sortByKey(p.base.s.icmp) - } - nv.tables[level] = nt + numLevel := len(p.levels) + if len(p.base.levels) > numLevel { + numLevel = len(p.base.levels) } + nv.levels = make([]tFiles, numLevel) + for level := 0; level < numLevel; level++ { + var baseTabels tFiles + if level < len(p.base.levels) { + baseTabels = p.base.levels[level] + } + + if level < len(p.levels) { + scratch := p.levels[level] + + var nt tFiles + // Prealloc list if possible. + if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 { + nt = make(tFiles, 0, n) + } + + // Base tables. + for _, t := range baseTabels { + if _, ok := scratch.deleted[t.fd.Num]; ok { + continue + } + if _, ok := scratch.added[t.fd.Num]; ok { + continue + } + nt = append(nt, t) + } + + // New tables. + for _, r := range scratch.added { + nt = append(nt, tableFileFromRecord(r)) + } + + if len(nt) != 0 { + // Sort tables. + if level == 0 { + nt.sortByNum() + } else { + nt.sortByKey(p.base.s.icmp) + } + + nv.levels[level] = nt + } + } else { + nv.levels[level] = baseTabels + } + } + + // Trim levels. + n := len(nv.levels) + for ; n > 0 && nv.levels[n-1] == nil; n-- { + } + nv.levels = nv.levels[:n] // Compute compaction score for new version. nv.computeCompaction() diff --git a/cmd/vendor/github.com/ugorji/go/LICENSE b/cmd/vendor/github.com/ugorji/go/LICENSE new file mode 100644 index 0000000..95a0f05 --- /dev/null +++ b/cmd/vendor/github.com/ugorji/go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go b/cmd/vendor/github.com/ugorji/go/codec/0doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go rename to cmd/vendor/github.com/ugorji/go/codec/0doc.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/README.md b/cmd/vendor/github.com/ugorji/go/codec/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/README.md rename to cmd/vendor/github.com/ugorji/go/codec/README.md diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go b/cmd/vendor/github.com/ugorji/go/codec/binc.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go rename to cmd/vendor/github.com/ugorji/go/codec/binc.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go b/cmd/vendor/github.com/ugorji/go/codec/cbor.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go rename to cmd/vendor/github.com/ugorji/go/codec/cbor.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go b/cmd/vendor/github.com/ugorji/go/codec/decode.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go rename to cmd/vendor/github.com/ugorji/go/codec/decode.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go b/cmd/vendor/github.com/ugorji/go/codec/encode.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go rename to cmd/vendor/github.com/ugorji/go/codec/encode.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go b/cmd/vendor/github.com/ugorji/go/codec/fast-path.generated.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go rename to cmd/vendor/github.com/ugorji/go/codec/fast-path.generated.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl b/cmd/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl rename to cmd/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/cmd/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl rename to cmd/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/cmd/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl rename to cmd/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go b/cmd/vendor/github.com/ugorji/go/codec/gen-helper.generated.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go rename to cmd/vendor/github.com/ugorji/go/codec/gen-helper.generated.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl b/cmd/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl rename to cmd/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go b/cmd/vendor/github.com/ugorji/go/codec/gen.generated.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go rename to cmd/vendor/github.com/ugorji/go/codec/gen.generated.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go b/cmd/vendor/github.com/ugorji/go/codec/gen.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go rename to cmd/vendor/github.com/ugorji/go/codec/gen.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go b/cmd/vendor/github.com/ugorji/go/codec/helper.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go rename to cmd/vendor/github.com/ugorji/go/codec/helper.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go b/cmd/vendor/github.com/ugorji/go/codec/helper_internal.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go rename to cmd/vendor/github.com/ugorji/go/codec/helper_internal.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go b/cmd/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go rename to cmd/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go b/cmd/vendor/github.com/ugorji/go/codec/helper_unsafe.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go rename to cmd/vendor/github.com/ugorji/go/codec/helper_unsafe.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go b/cmd/vendor/github.com/ugorji/go/codec/json.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/json.go rename to cmd/vendor/github.com/ugorji/go/codec/json.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go b/cmd/vendor/github.com/ugorji/go/codec/msgpack.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go rename to cmd/vendor/github.com/ugorji/go/codec/msgpack.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go b/cmd/vendor/github.com/ugorji/go/codec/noop.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go rename to cmd/vendor/github.com/ugorji/go/codec/noop.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.go b/cmd/vendor/github.com/ugorji/go/codec/prebuild.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.go rename to cmd/vendor/github.com/ugorji/go/codec/prebuild.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh b/cmd/vendor/github.com/ugorji/go/codec/prebuild.sh similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh rename to cmd/vendor/github.com/ugorji/go/codec/prebuild.sh diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/rpc.go b/cmd/vendor/github.com/ugorji/go/codec/rpc.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/rpc.go rename to cmd/vendor/github.com/ugorji/go/codec/rpc.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/simple.go b/cmd/vendor/github.com/ugorji/go/codec/simple.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/simple.go rename to cmd/vendor/github.com/ugorji/go/codec/simple.go diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/test-cbor-goldens.json b/cmd/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/test-cbor-goldens.json rename to cmd/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py b/cmd/vendor/github.com/ugorji/go/codec/test.py similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/test.py rename to cmd/vendor/github.com/ugorji/go/codec/test.py diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go b/cmd/vendor/github.com/ugorji/go/codec/time.go similarity index 100% rename from Godeps/_workspace/src/github.com/ugorji/go/codec/time.go rename to cmd/vendor/github.com/ugorji/go/codec/time.go diff --git a/config/config.toml b/config/config.toml index 03c1daa..d4ea545 100644 --- a/config/config.toml +++ b/config/config.toml @@ -34,8 +34,6 @@ readonly = false # leveldb # rocksdb # goleveldb -# lmdb -# boltdb # memory # db_name = "leveldb" diff --git a/etc/ledis.conf b/etc/ledis.conf index 7ebd83c..778319f 100644 --- a/etc/ledis.conf +++ b/etc/ledis.conf @@ -33,8 +33,6 @@ readonly = false # leveldb # rocksdb # goleveldb -# lmdb -# boltdb # memory # db_name = "leveldb" diff --git a/server/client_http.go b/server/client_http.go index 0496abe..2b429a3 100644 --- a/server/client_http.go +++ b/server/client_http.go @@ -16,18 +16,18 @@ import ( ) var allowedContentTypes = map[string]struct{}{ - "json": struct{}{}, - "bson": struct{}{}, - "msgpack": struct{}{}, + "json": {}, + "bson": {}, + "msgpack": {}, } var httpUnsupportedCommands = map[string]struct{}{ - "slaveof": struct{}{}, - "fullsync": struct{}{}, - "sync": struct{}{}, - "quit": struct{}{}, - "begin": struct{}{}, - "commit": struct{}{}, - "rollback": struct{}{}, + "slaveof": {}, + "fullsync": {}, + "sync": {}, + "quit": {}, + "begin": {}, + "commit": {}, + "rollback": {}, } type httpClient struct { diff --git a/server/cmd_script.go b/server/cmd_script.go index e299aa7..897028d 100644 --- a/server/cmd_script.go +++ b/server/cmd_script.go @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/siddontang/ledisdb/vendor/lua" + lua "github.com/siddontang/golua" ) func parseEvalArgs(l *lua.State, c *client) error { @@ -202,7 +202,7 @@ func scriptFlushCommand(c *client) error { return ErrCmdParams } - for n, _ := range s.chunks { + for n := range s.chunks { l.PushNil() l.SetGlobal(n) } diff --git a/server/cmd_sort_test.go b/server/cmd_sort_test.go index 9ca5ebc..894c812 100644 --- a/server/cmd_sort_test.go +++ b/server/cmd_sort_test.go @@ -17,7 +17,7 @@ func checkTestSortRes(ay interface{}, checks []string) error { return fmt.Errorf("invalid res number %d != %d", len(values), len(checks)) } - for i, _ := range values { + for i := range values { if string(values[i].([]byte)) != checks[i] { return fmt.Errorf("invalid res at %d, %s != %s", i, values[i], checks[i]) } diff --git a/server/script.go b/server/script.go index 8fd22ec..ecd884a 100644 --- a/server/script.go +++ b/server/script.go @@ -10,8 +10,8 @@ import ( "github.com/siddontang/go/hack" "github.com/siddontang/go/num" + lua "github.com/siddontang/golua" "github.com/siddontang/ledisdb/ledis" - "github.com/siddontang/ledisdb/vendor/lua" ) //ledis <-> lua type conversion, same as http://redis.io/commands/eval @@ -54,7 +54,7 @@ func (w *luaWriter) writeArray(lst []interface{}) { w.l.CreateTable(len(lst), 0) top := w.l.GetTop() - for i, _ := range lst { + for i := range lst { w.l.PushInteger(int64(i) + 1) switch v := lst[i].(type) { diff --git a/server/script_test.go b/server/script_test.go index bae27ff..ddd7b2a 100644 --- a/server/script_test.go +++ b/server/script_test.go @@ -5,8 +5,8 @@ package server import ( "fmt" + lua "github.com/siddontang/golua" "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/vendor/lua" "testing" ) diff --git a/store/boltdb/const.go b/store/boltdb/const.go deleted file mode 100644 index 1e7d0ae..0000000 --- a/store/boltdb/const.go +++ /dev/null @@ -1,3 +0,0 @@ -package boltdb - -const DBName = "boltdb" diff --git a/store/boltdb/db.go b/store/boltdb/db.go deleted file mode 100644 index db4cb81..0000000 --- a/store/boltdb/db.go +++ /dev/null @@ -1,173 +0,0 @@ -package boltdb - -import ( - "os" - "path" - - "github.com/boltdb/bolt" - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store/driver" -) - -var bucketName = []byte("ledisdb") - -type Store struct { -} - -func (s Store) String() string { - return DBName -} - -func (s Store) Open(dbPath string, cfg *config.Config) (driver.IDB, error) { - os.MkdirAll(dbPath, 0755) - name := path.Join(dbPath, "ledis_bolt.db") - db := new(DB) - var err error - - db.path = name - db.cfg = cfg - - db.db, err = bolt.Open(name, 0600, nil) - if err != nil { - return nil, err - } - - var tx *bolt.Tx - tx, err = db.db.Begin(true) - if err != nil { - return nil, err - } - - _, err = tx.CreateBucketIfNotExists(bucketName) - if err != nil { - tx.Rollback() - return nil, err - } - - if err = tx.Commit(); err != nil { - return nil, err - } - - return db, nil -} - -func (s Store) Repair(path string, cfg *config.Config) error { - return nil -} - -type DB struct { - cfg *config.Config - db *bolt.DB - path string -} - -func (db *DB) Close() error { - return db.db.Close() -} - -func (db *DB) Get(key []byte) ([]byte, error) { - var value []byte - - t, err := db.db.Begin(false) - if err != nil { - return nil, err - } - defer t.Rollback() - - b := t.Bucket(bucketName) - - value = b.Get(key) - - if value == nil { - return nil, nil - } else { - return append([]byte{}, value...), nil - } -} - -func (db *DB) Put(key []byte, value []byte) error { - err := db.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(bucketName) - return b.Put(key, value) - }) - return err -} - -func (db *DB) Delete(key []byte) error { - err := db.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(bucketName) - return b.Delete(key) - }) - return err -} - -func (db *DB) SyncPut(key []byte, value []byte) error { - return db.Put(key, value) -} - -func (db *DB) SyncDelete(key []byte) error { - return db.Delete(key) -} - -func (db *DB) NewIterator() driver.IIterator { - tx, err := db.db.Begin(false) - if err != nil { - return &Iterator{} - } - b := tx.Bucket(bucketName) - - return &Iterator{ - tx: tx, - it: b.Cursor()} -} - -func (db *DB) NewWriteBatch() driver.IWriteBatch { - return driver.NewWriteBatch(db) -} - -func (db *DB) Begin() (driver.Tx, error) { - tx, err := db.db.Begin(true) - if err != nil { - return nil, err - } - - return &Tx{ - tx: tx, - b: tx.Bucket(bucketName), - }, nil -} - -func (db *DB) NewSnapshot() (driver.ISnapshot, error) { - return newSnapshot(db) -} - -func (db *DB) BatchPut(writes []driver.Write) error { - err := db.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(bucketName) - var err error - for _, w := range writes { - if w.Value == nil { - err = b.Delete(w.Key) - } else { - err = b.Put(w.Key, w.Value) - } - if err != nil { - return err - } - } - return nil - }) - return err -} - -func (db *DB) SyncBatchPut(writes []driver.Write) error { - return db.BatchPut(writes) -} - -func (db *DB) Compact() error { - return nil -} - -func init() { - //driver.Register(Store{}) -} diff --git a/store/boltdb/iterator.go b/store/boltdb/iterator.go deleted file mode 100644 index aea7508..0000000 --- a/store/boltdb/iterator.go +++ /dev/null @@ -1,50 +0,0 @@ -package boltdb - -import ( - "github.com/boltdb/bolt" -) - -type Iterator struct { - tx *bolt.Tx - it *bolt.Cursor - key []byte - value []byte -} - -func (it *Iterator) Close() error { - if it.tx != nil { - return it.tx.Rollback() - } else { - return nil - } -} - -func (it *Iterator) First() { - it.key, it.value = it.it.First() -} - -func (it *Iterator) Last() { - it.key, it.value = it.it.Last() -} - -func (it *Iterator) Seek(key []byte) { - it.key, it.value = it.it.Seek(key) -} - -func (it *Iterator) Next() { - it.key, it.value = it.it.Next() -} -func (it *Iterator) Prev() { - it.key, it.value = it.it.Prev() -} - -func (it *Iterator) Valid() bool { - return !(it.key == nil && it.value == nil) -} - -func (it *Iterator) Key() []byte { - return it.key -} -func (it *Iterator) Value() []byte { - return it.value -} diff --git a/store/boltdb/snapshot.go b/store/boltdb/snapshot.go deleted file mode 100644 index ad4df90..0000000 --- a/store/boltdb/snapshot.go +++ /dev/null @@ -1,37 +0,0 @@ -package boltdb - -import ( - "github.com/boltdb/bolt" - "github.com/siddontang/ledisdb/store/driver" -) - -type Snapshot struct { - tx *bolt.Tx - b *bolt.Bucket -} - -func newSnapshot(db *DB) (*Snapshot, error) { - tx, err := db.db.Begin(false) - if err != nil { - return nil, err - } - - return &Snapshot{ - tx: tx, - b: tx.Bucket(bucketName)}, nil -} - -func (s *Snapshot) Get(key []byte) ([]byte, error) { - return s.b.Get(key), nil -} - -func (s *Snapshot) NewIterator() driver.IIterator { - return &Iterator{ - tx: nil, - it: s.b.Cursor(), - } -} - -func (s *Snapshot) Close() { - s.tx.Rollback() -} diff --git a/store/boltdb/tx.go b/store/boltdb/tx.go deleted file mode 100644 index 8dba000..0000000 --- a/store/boltdb/tx.go +++ /dev/null @@ -1,61 +0,0 @@ -package boltdb - -import ( - "github.com/boltdb/bolt" - "github.com/siddontang/ledisdb/store/driver" -) - -type Tx struct { - tx *bolt.Tx - b *bolt.Bucket -} - -func (t *Tx) Get(key []byte) ([]byte, error) { - return t.b.Get(key), nil -} - -func (t *Tx) Put(key []byte, value []byte) error { - return t.b.Put(key, value) -} - -func (t *Tx) Delete(key []byte) error { - return t.b.Delete(key) -} - -func (t *Tx) NewIterator() driver.IIterator { - return &Iterator{ - tx: nil, - it: t.b.Cursor(), - } -} - -func (t *Tx) NewWriteBatch() driver.IWriteBatch { - return driver.NewWriteBatch(t) -} - -func (t *Tx) BatchPut(writes []driver.Write) error { - var err error - for _, w := range writes { - if w.Value == nil { - err = t.b.Delete(w.Key) - } else { - err = t.b.Put(w.Key, w.Value) - } - if err != nil { - return err - } - } - return nil -} - -func (t *Tx) SyncBatchPut(writes []driver.Write) error { - return t.BatchPut(writes) -} - -func (t *Tx) Rollback() error { - return t.tx.Rollback() -} - -func (t *Tx) Commit() error { - return t.tx.Commit() -} diff --git a/store/driver/store.go b/store/driver/store.go index 2116696..fbaebfc 100644 --- a/store/driver/store.go +++ b/store/driver/store.go @@ -25,7 +25,7 @@ func Register(s Store) { func ListStores() []string { s := []string{} - for k, _ := range dbs { + for k := range dbs { s = append(s, k) } diff --git a/store/mdb/const.go b/store/mdb/const.go deleted file mode 100644 index cdc70e0..0000000 --- a/store/mdb/const.go +++ /dev/null @@ -1,3 +0,0 @@ -package mdb - -const DBName = "lmdb" diff --git a/store/mdb/influxdb_license b/store/mdb/influxdb_license deleted file mode 100644 index 03f21e8..0000000 --- a/store/mdb/influxdb_license +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013-2014 Errplane Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/store/mdb/mdb.go b/store/mdb/mdb.go deleted file mode 100644 index 262bd20..0000000 --- a/store/mdb/mdb.go +++ /dev/null @@ -1,317 +0,0 @@ -// +build lmdb - -package mdb - -import ( - "os" - - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store/driver" - mdb "github.com/siddontang/ledisdb/vendor/gomdb" -) - -type Store struct { -} - -func (s Store) String() string { - return DBName -} - -type MDB struct { - env *mdb.Env - db mdb.DBI - path string - cfg *config.Config -} - -func (s Store) Open(path string, c *config.Config) (driver.IDB, error) { - mapSize := c.LMDB.MapSize - noSync := c.LMDB.NoSync - - if mapSize <= 0 { - mapSize = 500 * 1024 * 1024 - } - - env, err := mdb.NewEnv() - if err != nil { - return MDB{}, err - } - - // TODO: max dbs should be configurable - if err := env.SetMaxDBs(1); err != nil { - return MDB{}, err - } - - if err := env.SetMapSize(uint64(mapSize)); err != nil { - return MDB{}, err - } - - if _, err := os.Stat(path); err != nil { - err = os.MkdirAll(path, 0755) - if err != nil { - return MDB{}, err - } - } - - var flags uint = mdb.CREATE - if noSync { - flags |= mdb.NOSYNC | mdb.NOMETASYNC | mdb.WRITEMAP | mdb.MAPASYNC - } - - err = env.Open(path, flags, 0755) - if err != nil { - return MDB{}, err - } - - tx, err := env.BeginTxn(nil, 0) - if err != nil { - return MDB{}, err - } - - dbi, err := tx.DBIOpen(nil, mdb.CREATE) - if err != nil { - return MDB{}, err - } - - if err := tx.Commit(); err != nil { - return MDB{}, err - } - - db := MDB{ - env: env, - db: dbi, - path: path, - } - - return db, nil -} - -func (s Store) Repair(path string, c *config.Config) error { - println("llmd not supports repair") - return nil -} - -func (db MDB) Put(key, value []byte) error { - itr := db.iterator(false) - defer itr.Close() - - itr.err = itr.c.Put(key, value, 0) - itr.setState() - return itr.err -} - -func (db MDB) BatchPut(writes []driver.Write) error { - itr := db.iterator(false) - defer itr.Close() - - for _, w := range writes { - if w.Value == nil { - itr.key, itr.value, itr.err = itr.c.Get(w.Key, nil, mdb.SET) - if itr.err == nil { - itr.err = itr.c.Del(0) - } - } else { - itr.err = itr.c.Put(w.Key, w.Value, 0) - } - - if itr.err != nil && itr.err != mdb.NotFound { - break - } - } - itr.setState() - - return itr.err -} - -func (db MDB) SyncBatchPut(writes []driver.Write) error { - if err := db.BatchPut(writes); err != nil { - return err - } - - return db.env.Sync(1) -} - -func (db MDB) Get(key []byte) ([]byte, error) { - tx, err := db.env.BeginTxn(nil, mdb.RDONLY) - if err != nil { - return nil, err - } - defer tx.Commit() - - v, err := tx.Get(db.db, key) - if err == mdb.NotFound { - return nil, nil - } - return v, err -} - -func (db MDB) Delete(key []byte) error { - itr := db.iterator(false) - defer itr.Close() - - itr.key, itr.value, itr.err = itr.c.Get(key, nil, mdb.SET) - if itr.err == nil { - itr.err = itr.c.Del(0) - } - itr.setState() - return itr.Error() -} - -func (db MDB) SyncPut(key []byte, value []byte) error { - if err := db.Put(key, value); err != nil { - return err - } - - return db.env.Sync(1) -} - -func (db MDB) SyncDelete(key []byte) error { - if err := db.Delete(key); err != nil { - return err - } - - return db.env.Sync(1) -} - -type MDBIterator struct { - key []byte - value []byte - c *mdb.Cursor - tx *mdb.Txn - valid bool - err error - - closeAutoCommit bool -} - -func (itr *MDBIterator) Key() []byte { - return itr.key -} - -func (itr *MDBIterator) Value() []byte { - return itr.value -} - -func (itr *MDBIterator) Valid() bool { - return itr.valid -} - -func (itr *MDBIterator) Error() error { - return itr.err -} - -func (itr *MDBIterator) getCurrent() { - itr.key, itr.value, itr.err = itr.c.Get(nil, nil, mdb.GET_CURRENT) - itr.setState() -} - -func (itr *MDBIterator) Seek(key []byte) { - itr.key, itr.value, itr.err = itr.c.Get(key, nil, mdb.SET_RANGE) - itr.setState() -} -func (itr *MDBIterator) Next() { - itr.key, itr.value, itr.err = itr.c.Get(nil, nil, mdb.NEXT) - itr.setState() -} - -func (itr *MDBIterator) Prev() { - itr.key, itr.value, itr.err = itr.c.Get(nil, nil, mdb.PREV) - itr.setState() -} - -func (itr *MDBIterator) First() { - itr.key, itr.value, itr.err = itr.c.Get(nil, nil, mdb.FIRST) - itr.setState() -} - -func (itr *MDBIterator) Last() { - itr.key, itr.value, itr.err = itr.c.Get(nil, nil, mdb.LAST) - itr.setState() -} - -func (itr *MDBIterator) setState() { - if itr.err != nil { - if itr.err == mdb.NotFound { - itr.err = nil - } - itr.valid = false - } else { - itr.valid = true - } -} - -func (itr *MDBIterator) Close() error { - if err := itr.c.Close(); err != nil { - itr.tx.Abort() - return err - } - - if !itr.closeAutoCommit { - return itr.err - } - - if itr.err != nil { - itr.tx.Abort() - return itr.err - } - return itr.tx.Commit() -} - -func (_ MDB) Name() string { - return "lmdb" -} - -func (db MDB) Path() string { - return db.path -} - -func (db MDB) iterator(rdonly bool) *MDBIterator { - flags := uint(0) - if rdonly { - flags = mdb.RDONLY - } - tx, err := db.env.BeginTxn(nil, flags) - if err != nil { - return &MDBIterator{nil, nil, nil, nil, false, err, true} - } - - c, err := tx.CursorOpen(db.db) - if err != nil { - tx.Abort() - return &MDBIterator{nil, nil, nil, nil, false, err, true} - } - - return &MDBIterator{nil, nil, c, tx, true, nil, true} -} - -func (db MDB) Close() error { - db.env.DBIClose(db.db) - if err := db.env.Close(); err != nil { - panic(err) - } - return nil -} - -func (db MDB) NewIterator() driver.IIterator { - return db.iterator(true) -} - -func (db MDB) NewWriteBatch() driver.IWriteBatch { - return driver.NewWriteBatch(db) -} - -func (db MDB) Begin() (driver.Tx, error) { - return newTx(db) -} - -func (db MDB) NewSnapshot() (driver.ISnapshot, error) { - return newSnapshot(db) -} - -func (db MDB) Compact() error { - return nil -} - -func init() { - driver.Register(Store{}) -} diff --git a/store/mdb/snapshot.go b/store/mdb/snapshot.go deleted file mode 100644 index 0e26ba9..0000000 --- a/store/mdb/snapshot.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build lmdb - -package mdb - -import ( - "github.com/siddontang/ledisdb/store/driver" - mdb "github.com/siddontang/ledisdb/vendor/gomdb" -) - -type Snapshot struct { - db mdb.DBI - tx *mdb.Txn -} - -func newSnapshot(db MDB) (*Snapshot, error) { - tx, err := db.env.BeginTxn(nil, mdb.RDONLY) - if err != nil { - return nil, err - } - - return &Snapshot{db.db, tx}, nil -} - -func (s *Snapshot) Get(key []byte) ([]byte, error) { - v, err := s.tx.Get(s.db, key) - if err == mdb.NotFound { - return nil, nil - } - return v, err -} - -func (s *Snapshot) NewIterator() driver.IIterator { - c, err := s.tx.CursorOpen(s.db) - if err != nil { - return &MDBIterator{nil, nil, nil, nil, false, err, false} - } - - return &MDBIterator{nil, nil, c, s.tx, true, nil, false} -} - -func (s *Snapshot) Close() { - s.tx.Commit() -} diff --git a/store/mdb/tx.go b/store/mdb/tx.go deleted file mode 100644 index 0d13034..0000000 --- a/store/mdb/tx.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build lmdb - -package mdb - -import ( - "github.com/siddontang/ledisdb/store/driver" - mdb "github.com/siddontang/ledisdb/vendor/gomdb" -) - -type Tx struct { - db mdb.DBI - tx *mdb.Txn -} - -func newTx(db MDB) (*Tx, error) { - tx, err := db.env.BeginTxn(nil, uint(0)) - if err != nil { - return nil, err - } - - return &Tx{db.db, tx}, nil -} - -func (t *Tx) Get(key []byte) ([]byte, error) { - v, err := t.tx.Get(t.db, key) - if err == mdb.NotFound { - return nil, nil - } - return v, err -} - -func (t *Tx) Put(key []byte, value []byte) error { - return t.tx.Put(t.db, key, value, mdb.NODUPDATA) -} - -func (t *Tx) Delete(key []byte) error { - return t.tx.Del(t.db, key, nil) -} - -func (t *Tx) NewIterator() driver.IIterator { - return t.newIterator() -} - -func (t *Tx) newIterator() *MDBIterator { - c, err := t.tx.CursorOpen(t.db) - if err != nil { - return &MDBIterator{nil, nil, nil, nil, false, err, false} - } - - return &MDBIterator{nil, nil, c, t.tx, true, nil, false} -} - -func (t *Tx) NewWriteBatch() driver.IWriteBatch { - return driver.NewWriteBatch(t) -} - -func (t *Tx) BatchPut(writes []driver.Write) error { - itr := t.newIterator() - - for _, w := range writes { - if w.Value == nil { - itr.key, itr.value, itr.err = itr.c.Get(w.Key, nil, mdb.SET) - if itr.err == nil { - itr.err = itr.c.Del(0) - } - } else { - itr.err = itr.c.Put(w.Key, w.Value, 0) - } - - if itr.err != nil && itr.err != mdb.NotFound { - break - } - } - itr.setState() - - return itr.Close() -} - -func (t *Tx) SyncBatchPut(writes []driver.Write) error { - return t.BatchPut(writes) -} - -func (t *Tx) Rollback() error { - t.tx.Abort() - return nil -} - -func (t *Tx) Commit() error { - return t.tx.Commit() -} diff --git a/store/store.go b/store/store.go index dbe843a..1352491 100644 --- a/store/store.go +++ b/store/store.go @@ -8,10 +8,8 @@ import ( "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/store/driver" - _ "github.com/siddontang/ledisdb/store/boltdb" _ "github.com/siddontang/ledisdb/store/goleveldb" _ "github.com/siddontang/ledisdb/store/leveldb" - _ "github.com/siddontang/ledisdb/store/mdb" _ "github.com/siddontang/ledisdb/store/rocksdb" ) diff --git a/vendor/README.md b/vendor/README.md deleted file mode 100644 index 25080ae..0000000 --- a/vendor/README.md +++ /dev/null @@ -1,3 +0,0 @@ -[godep](https://github.com/tools/godep) can not save packages which have build tags. - -So we put these packages here explicitly. \ No newline at end of file diff --git a/vendor/gomdb/LICENSE b/vendor/gomdb/LICENSE deleted file mode 100644 index 96fd7a2..0000000 --- a/vendor/gomdb/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Copyright (c) 2013, Ferenc Szalai -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gomdb/README.md b/vendor/gomdb/README.md deleted file mode 100644 index 573c203..0000000 --- a/vendor/gomdb/README.md +++ /dev/null @@ -1,28 +0,0 @@ -gomdb -===== - -Go wrapper for OpenLDAP Lightning Memory-Mapped Database (LMDB). -Read more about LMDB here: http://symas.com/mdb/ - -GoDoc available here: http://godoc.org/github.com/szferi/gomdb - -Build -======= - -`go get github.com/szferi/gomdb` - -There is no dependency on LMDB dynamic library. - -On FreeBSD 10, you must explicitly set `CC` (otherwise it will fail with a cryptic error), for example: - -`CC=clang go test -v` - -TODO -====== - - * write more documentation - * write more unit test - * benchmark - * figure out how can you write go binding for `MDB_comp_func` and `MDB_rel_func` - * Handle go `*Cursor` close with `txn.Commit` and `txn.Abort` transparently - diff --git a/vendor/gomdb/bench_test.go b/vendor/gomdb/bench_test.go deleted file mode 100644 index 8dd6f0b..0000000 --- a/vendor/gomdb/bench_test.go +++ /dev/null @@ -1,334 +0,0 @@ -// +build lmdb - -package mdb - -import ( - crand "crypto/rand" - "io/ioutil" - "math/rand" - "os" - "testing" -) - -// repeatedly put (overwrite) keys. -func BenchmarkTxnPut(b *testing.B) { - initRandSource(b) - env, path := setupBenchDB(b) - defer teardownBenchDB(b, env, path) - - dbi := openBenchDBI(b, env) - - var ps [][]byte - - rc := newRandSourceCursor() - txn, err := env.BeginTxn(nil, 0) - bMust(b, err, "starting transaction") - for i := 0; i < benchDBNumKeys; i++ { - k := makeBenchDBKey(&rc) - v := makeBenchDBVal(&rc) - err := txn.Put(dbi, k, v, 0) - ps = append(ps, k, v) - bTxnMust(b, txn, err, "putting data") - } - err = txn.Commit() - bMust(b, err, "commiting transaction") - - txn, err = env.BeginTxn(nil, 0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - k := ps[rand.Intn(len(ps)/2)*2] - v := makeBenchDBVal(&rc) - err := txn.Put(dbi, k, v, 0) - bTxnMust(b, txn, err, "putting data") - } - b.StopTimer() - err = txn.Commit() - bMust(b, err, "commiting transaction") -} - -// repeatedly get random keys. -func BenchmarkTxnGetRDONLY(b *testing.B) { - initRandSource(b) - env, path := setupBenchDB(b) - defer teardownBenchDB(b, env, path) - - dbi := openBenchDBI(b, env) - - var ps [][]byte - - rc := newRandSourceCursor() - txn, err := env.BeginTxn(nil, 0) - bMust(b, err, "starting transaction") - for i := 0; i < benchDBNumKeys; i++ { - k := makeBenchDBKey(&rc) - v := makeBenchDBVal(&rc) - err := txn.Put(dbi, k, v, 0) - ps = append(ps, k, v) - bTxnMust(b, txn, err, "putting data") - } - err = txn.Commit() - bMust(b, err, "commiting transaction") - - txn, err = env.BeginTxn(nil, RDONLY) - bMust(b, err, "starting transaction") - defer txn.Abort() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := txn.Get(dbi, ps[rand.Intn(len(ps))]) - if err == NotFound { - continue - } - if err != nil { - b.Fatalf("error getting data: %v", err) - } - } - b.StopTimer() -} - -// like BenchmarkTxnGetRDONLY, but txn.GetVal() is called instead. -func BenchmarkTxnGetValRDONLY(b *testing.B) { - initRandSource(b) - env, path := setupBenchDB(b) - defer teardownBenchDB(b, env, path) - - dbi := openBenchDBI(b, env) - - var ps [][]byte - - rc := newRandSourceCursor() - txn, err := env.BeginTxn(nil, 0) - bMust(b, err, "starting transaction") - for i := 0; i < benchDBNumKeys; i++ { - k := makeBenchDBKey(&rc) - v := makeBenchDBVal(&rc) - err := txn.Put(dbi, k, v, 0) - ps = append(ps, k, v) - bTxnMust(b, txn, err, "putting data") - } - err = txn.Commit() - bMust(b, err, "commiting transaction") - - txn, err = env.BeginTxn(nil, RDONLY) - bMust(b, err, "starting transaction") - defer txn.Abort() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := txn.GetVal(dbi, ps[rand.Intn(len(ps))]) - if err == NotFound { - continue - } - if err != nil { - b.Fatalf("error getting data: %v", err) - } - } - b.StopTimer() -} - -// repeatedly scan all the values in a database. -func BenchmarkCursorScanRDONLY(b *testing.B) { - initRandSource(b) - env, path := setupBenchDB(b) - defer teardownBenchDB(b, env, path) - - dbi := openBenchDBI(b, env) - - var ps [][]byte - - rc := newRandSourceCursor() - txn, err := env.BeginTxn(nil, 0) - bMust(b, err, "starting transaction") - for i := 0; i < benchDBNumKeys; i++ { - k := makeBenchDBKey(&rc) - v := makeBenchDBVal(&rc) - err := txn.Put(dbi, k, v, 0) - ps = append(ps, k, v) - bTxnMust(b, txn, err, "putting data") - } - err = txn.Commit() - bMust(b, err, "commiting transaction") - - txn, err = env.BeginTxn(nil, RDONLY) - bMust(b, err, "starting transaction") - defer txn.Abort() - b.ResetTimer() - for i := 0; i < b.N; i++ { - func() { - cur, err := txn.CursorOpen(dbi) - bMust(b, err, "opening cursor") - defer cur.Close() - var count int64 - for { - _, _, err := cur.Get(nil, nil, NEXT) - if err == NotFound { - return - } - if err != nil { - b.Fatalf("error getting data: %v", err) - } - count++ - } - if count != benchDBNumKeys { - b.Fatalf("unexpected number of keys: %d", count) - } - }() - } - b.StopTimer() -} - -// like BenchmarkCursoreScanRDONLY, but cursor.GetVal() is called instead. -func BenchmarkCursorScanValRDONLY(b *testing.B) { - initRandSource(b) - env, path := setupBenchDB(b) - defer teardownBenchDB(b, env, path) - - dbi := openBenchDBI(b, env) - - var ps [][]byte - - rc := newRandSourceCursor() - txn, err := env.BeginTxn(nil, 0) - bMust(b, err, "starting transaction") - for i := 0; i < benchDBNumKeys; i++ { - k := makeBenchDBKey(&rc) - v := makeBenchDBVal(&rc) - err := txn.Put(dbi, k, v, 0) - ps = append(ps, k, v) - bTxnMust(b, txn, err, "putting data") - } - err = txn.Commit() - bMust(b, err, "commiting transaction") - - txn, err = env.BeginTxn(nil, RDONLY) - bMust(b, err, "starting transaction") - defer txn.Abort() - b.ResetTimer() - for i := 0; i < b.N; i++ { - func() { - cur, err := txn.CursorOpen(dbi) - bMust(b, err, "opening cursor") - defer cur.Close() - var count int64 - for { - _, _, err := cur.GetVal(nil, nil, NEXT) - if err == NotFound { - return - } - if err != nil { - b.Fatalf("error getting data: %v", err) - } - count++ - } - if count != benchDBNumKeys { - b.Fatalf("unexpected number of keys: %d", count) - } - }() - } - b.StopTimer() -} - -func setupBenchDB(b *testing.B) (*Env, string) { - env, err := NewEnv() - bMust(b, err, "creating env") - err = env.SetMaxDBs(26) - bMust(b, err, "setting max dbs") - err = env.SetMapSize(1 << 30) // 1GB - bMust(b, err, "sizing env") - path, err := ioutil.TempDir("", "mdb_test-bench-") - bMust(b, err, "creating temp directory") - err = env.Open(path, 0, 0644) - if err != nil { - teardownBenchDB(b, env, path) - } - bMust(b, err, "opening database") - return env, path -} - -func openBenchDBI(b *testing.B, env *Env) DBI { - txn, err := env.BeginTxn(nil, 0) - bMust(b, err, "starting transaction") - name := "benchmark" - dbi, err := txn.DBIOpen(&name, CREATE) - if err != nil { - txn.Abort() - b.Fatalf("error opening dbi: %v", err) - } - err = txn.Commit() - bMust(b, err, "commiting transaction") - return dbi -} - -func teardownBenchDB(b *testing.B, env *Env, path string) { - env.Close() - os.RemoveAll(path) -} - -func randBytes(n int) []byte { - p := make([]byte, n) - crand.Read(p) - return p -} - -func bMust(b *testing.B, err error, action string) { - if err != nil { - b.Fatalf("error %s: %v", action, err) - } -} - -func bTxnMust(b *testing.B, txn *Txn, err error, action string) { - if err != nil { - txn.Abort() - b.Fatalf("error %s: %v", action, err) - } -} - -const randSourceSize = 500 << 20 // size of the 'entropy pool' for random byte generation. -const benchDBNumKeys = 100000 // number of keys to store in benchmark databases -const benchDBMaxKeyLen = 30 // maximum length for database keys (size is limited by MDB) -const benchDBMaxValLen = 2000 // maximum lengh for database values - -func makeBenchDBKey(c *randSourceCursor) []byte { - return c.NBytes(rand.Intn(benchDBMaxKeyLen) + 1) -} - -func makeBenchDBVal(c *randSourceCursor) []byte { - return c.NBytes(rand.Intn(benchDBMaxValLen) + 1) -} - -// holds a bunch of random bytes so repeated generation af 'random' slices is -// cheap. acts as a ring which can be read from (although doesn't implement io.Reader). -var randSource [randSourceSize]byte - -func initRandSource(b *testing.B) { - if randSource[0] == 0 && randSource[1] == 0 && randSource[2] == 0 && randSource[3] == 0 { - b.Logf("initializing random source data") - n, err := crand.Read(randSource[:]) - bMust(b, err, "initializing random source") - if n < len(randSource) { - b.Fatalf("unable to read enough random source data %d", n) - } - } -} - -// acts as a simple byte slice generator. -type randSourceCursor int - -func newRandSourceCursor() randSourceCursor { - i := rand.Intn(randSourceSize) - return randSourceCursor(i) -} - -func (c *randSourceCursor) NBytes(n int) []byte { - i := int(*c) - if n >= randSourceSize { - panic("rand size too big") - } - *c = (*c + randSourceCursor(n)) % randSourceSize - _n := i + n - randSourceSize - if _n > 0 { - p := make([]byte, n) - m := copy(p, randSource[i:]) - copy(p[m:], randSource[:]) - return p - } - return randSource[i : i+n] -} diff --git a/vendor/gomdb/cursor.go b/vendor/gomdb/cursor.go deleted file mode 100644 index ef97aac..0000000 --- a/vendor/gomdb/cursor.go +++ /dev/null @@ -1,105 +0,0 @@ -// +build lmdb - -package mdb - -/* -#cgo CFLAGS: -pthread -W -Wall -Wno-unused-parameter -Wbad-function-cast -O2 -g -#cgo freebsd CFLAGS: -DMDB_DSYNC=O_SYNC -#cgo openbsd CFLAGS: -DMDB_DSYNC=O_SYNC -#cgo netbsd CFLAGS: -DMDB_DSYNC=O_SYNC -#include -#include -#include "lmdb.h" -*/ -import "C" - -import ( - "errors" -) - -// MDB_cursor_op -const ( - FIRST = iota - FIRST_DUP - GET_BOTH - GET_RANGE - GET_CURRENT - GET_MULTIPLE - LAST - LAST_DUP - NEXT - NEXT_DUP - NEXT_MULTIPLE - NEXT_NODUP - PREV - PREV_DUP - PREV_NODUP - SET - SET_KEY - SET_RANGE -) - -func (cursor *Cursor) Close() error { - if cursor._cursor == nil { - return errors.New("Cursor already closed") - } - C.mdb_cursor_close(cursor._cursor) - cursor._cursor = nil - return nil -} - -func (cursor *Cursor) Txn() *Txn { - var _txn *C.MDB_txn - _txn = C.mdb_cursor_txn(cursor._cursor) - if _txn != nil { - return &Txn{_txn} - } - return nil -} - -func (cursor *Cursor) DBI() DBI { - var _dbi C.MDB_dbi - _dbi = C.mdb_cursor_dbi(cursor._cursor) - return DBI(_dbi) -} - -// Retrieves the low-level MDB cursor. -func (cursor *Cursor) MdbCursor() *C.MDB_cursor { - return cursor._cursor -} - -func (cursor *Cursor) Get(set_key, sval []byte, op uint) (key, val []byte, err error) { - k, v, err := cursor.GetVal(set_key, sval, op) - if err != nil { - return nil, nil, err - } - return k.Bytes(), v.Bytes(), nil -} - -func (cursor *Cursor) GetVal(key, val []byte, op uint) (Val, Val, error) { - ckey := Wrap(key) - cval := Wrap(val) - ret := C.mdb_cursor_get(cursor._cursor, (*C.MDB_val)(&ckey), (*C.MDB_val)(&cval), C.MDB_cursor_op(op)) - return ckey, cval, errno(ret) -} - -func (cursor *Cursor) Put(key, val []byte, flags uint) error { - ckey := Wrap(key) - cval := Wrap(val) - ret := C.mdb_cursor_put(cursor._cursor, (*C.MDB_val)(&ckey), (*C.MDB_val)(&cval), C.uint(flags)) - return errno(ret) -} - -func (cursor *Cursor) Del(flags uint) error { - ret := C.mdb_cursor_del(cursor._cursor, C.uint(flags)) - return errno(ret) -} - -func (cursor *Cursor) Count() (uint64, error) { - var _size C.size_t - ret := C.mdb_cursor_count(cursor._cursor, &_size) - if ret != SUCCESS { - return 0, errno(ret) - } - return uint64(_size), nil -} diff --git a/vendor/gomdb/env.go b/vendor/gomdb/env.go deleted file mode 100644 index 531833c..0000000 --- a/vendor/gomdb/env.go +++ /dev/null @@ -1,229 +0,0 @@ -// +build lmdb - -package mdb - -/* -#cgo CFLAGS: -pthread -W -Wall -Wno-unused-parameter -Wbad-function-cast -O2 -g -#cgo freebsd CFLAGS: -DMDB_DSYNC=O_SYNC -#cgo openbsd CFLAGS: -DMDB_DSYNC=O_SYNC -#cgo netbsd CFLAGS: -DMDB_DSYNC=O_SYNC -#include -#include -#include "lmdb.h" -*/ -import "C" - -import ( - "errors" - "fmt" - "syscall" - "unsafe" -) - -const SUCCESS = C.MDB_SUCCESS - -// mdb_env Environment Flags -const ( - FIXEDMAP = C.MDB_FIXEDMAP // mmap at a fixed address (experimental) - NOSUBDIR = C.MDB_NOSUBDIR // no environment directory - NOSYNC = C.MDB_NOSYNC // don't fsync after commit - RDONLY = C.MDB_RDONLY // read only - NOMETASYNC = C.MDB_NOMETASYNC // don't fsync metapage after commit - WRITEMAP = C.MDB_WRITEMAP // use writable mmap - MAPASYNC = C.MDB_MAPASYNC // use asynchronous msync when MDB_WRITEMAP is use - NOTLS = C.MDB_NOTLS // tie reader locktable slots to Txn objects instead of threads -) - -type DBI uint - -type Errno C.int - -// minimum and maximum values produced for the Errno type. syscall.Errnos of -// other values may still be produced. -const minErrno, maxErrno C.int = C.MDB_KEYEXIST, C.MDB_LAST_ERRCODE - -func (e Errno) Error() string { - s := C.GoString(C.mdb_strerror(C.int(e))) - if s == "" { - return fmt.Sprint("mdb errno:", int(e)) - } - return s -} - -// for tests that can't import C -func _errno(ret int) error { - return errno(C.int(ret)) -} - -func errno(ret C.int) error { - if ret == C.MDB_SUCCESS { - return nil - } - if minErrno <= ret && ret <= maxErrno { - return Errno(ret) - } - return syscall.Errno(ret) -} - -// error codes -const ( - KeyExist = Errno(C.MDB_KEYEXIST) - NotFound = Errno(C.MDB_NOTFOUND) - PageNotFound = Errno(C.MDB_PAGE_NOTFOUND) - Corrupted = Errno(C.MDB_CORRUPTED) - Panic = Errno(C.MDB_PANIC) - VersionMismatch = Errno(C.MDB_VERSION_MISMATCH) - Invalid = Errno(C.MDB_INVALID) - MapFull = Errno(C.MDB_MAP_FULL) - DbsFull = Errno(C.MDB_DBS_FULL) - ReadersFull = Errno(C.MDB_READERS_FULL) - TlsFull = Errno(C.MDB_TLS_FULL) - TxnFull = Errno(C.MDB_TXN_FULL) - CursorFull = Errno(C.MDB_CURSOR_FULL) - PageFull = Errno(C.MDB_PAGE_FULL) - MapResized = Errno(C.MDB_MAP_RESIZED) - Incompatibile = Errno(C.MDB_INCOMPATIBLE) -) - -func Version() string { - var major, minor, patch *C.int - ver_str := C.mdb_version(major, minor, patch) - return C.GoString(ver_str) -} - -// Env is opaque structure for a database environment. -// A DB environment supports multiple databases, all residing in the -// same shared-memory map. -type Env struct { - _env *C.MDB_env -} - -// Create an MDB environment handle. -func NewEnv() (*Env, error) { - var _env *C.MDB_env - ret := C.mdb_env_create(&_env) - if ret != SUCCESS { - return nil, errno(ret) - } - return &Env{_env}, nil -} - -// Open an environment handle. If this function fails Close() must be called to discard the Env handle. -func (env *Env) Open(path string, flags uint, mode uint) error { - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - ret := C.mdb_env_open(env._env, cpath, C.uint(NOTLS|flags), C.mdb_mode_t(mode)) - return errno(ret) -} - -func (env *Env) Close() error { - if env._env == nil { - return errors.New("Environment already closed") - } - C.mdb_env_close(env._env) - env._env = nil - return nil -} - -func (env *Env) Copy(path string) error { - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - ret := C.mdb_env_copy(env._env, cpath) - return errno(ret) -} - -// Statistics for a database in the environment -type Stat struct { - PSize uint // Size of a database page. This is currently the same for all databases. - Depth uint // Depth (height) of the B-tree - BranchPages uint64 // Number of internal (non-leaf) pages - LeafPages uint64 // Number of leaf pages - OverflowPages uint64 // Number of overflow pages - Entries uint64 // Number of data items -} - -func (env *Env) Stat() (*Stat, error) { - var _stat C.MDB_stat - ret := C.mdb_env_stat(env._env, &_stat) - if ret != SUCCESS { - return nil, errno(ret) - } - stat := Stat{PSize: uint(_stat.ms_psize), - Depth: uint(_stat.ms_depth), - BranchPages: uint64(_stat.ms_branch_pages), - LeafPages: uint64(_stat.ms_leaf_pages), - OverflowPages: uint64(_stat.ms_overflow_pages), - Entries: uint64(_stat.ms_entries)} - return &stat, nil -} - -type Info struct { - MapSize uint64 // Size of the data memory map - LastPNO uint64 // ID of the last used page - LastTxnID uint64 // ID of the last committed transaction - MaxReaders uint // maximum number of threads for the environment - NumReaders uint // maximum number of threads used in the environment -} - -func (env *Env) Info() (*Info, error) { - var _info C.MDB_envinfo - ret := C.mdb_env_info(env._env, &_info) - if ret != SUCCESS { - return nil, errno(ret) - } - info := Info{MapSize: uint64(_info.me_mapsize), - LastPNO: uint64(_info.me_last_pgno), - LastTxnID: uint64(_info.me_last_txnid), - MaxReaders: uint(_info.me_maxreaders), - NumReaders: uint(_info.me_numreaders)} - return &info, nil -} - -func (env *Env) Sync(force int) error { - ret := C.mdb_env_sync(env._env, C.int(force)) - return errno(ret) -} - -func (env *Env) SetFlags(flags uint, onoff int) error { - ret := C.mdb_env_set_flags(env._env, C.uint(flags), C.int(onoff)) - return errno(ret) -} - -func (env *Env) Flags() (uint, error) { - var _flags C.uint - ret := C.mdb_env_get_flags(env._env, &_flags) - if ret != SUCCESS { - return 0, errno(ret) - } - return uint(_flags), nil -} - -func (env *Env) Path() (string, error) { - var path string - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - ret := C.mdb_env_get_path(env._env, &cpath) - if ret != SUCCESS { - return "", errno(ret) - } - return C.GoString(cpath), nil -} - -func (env *Env) SetMapSize(size uint64) error { - ret := C.mdb_env_set_mapsize(env._env, C.size_t(size)) - return errno(ret) -} - -func (env *Env) SetMaxReaders(size uint) error { - ret := C.mdb_env_set_maxreaders(env._env, C.uint(size)) - return errno(ret) -} - -func (env *Env) SetMaxDBs(size DBI) error { - ret := C.mdb_env_set_maxdbs(env._env, C.MDB_dbi(size)) - return errno(ret) -} - -func (env *Env) DBIClose(dbi DBI) { - C.mdb_dbi_close(env._env, C.MDB_dbi(dbi)) -} diff --git a/vendor/gomdb/env_test.go b/vendor/gomdb/env_test.go deleted file mode 100644 index 8225eaf..0000000 --- a/vendor/gomdb/env_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build lmdb - -package mdb - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestEnvOpen(t *testing.T) { - env, err := NewEnv() - if err != nil { - t.Errorf("Cannot create enviroment: %s", err) - } - err = env.Open("adsjgfadsfjg", 0, 0664) - if err == nil { - t.Errorf("should not be able to open") - } - path, err := ioutil.TempDir("/tmp", "mdb_test") - if err != nil { - t.Errorf("Cannot create temporary directory") - } - err = os.MkdirAll(path, 0770) - if err != nil { - t.Errorf("Cannot create directory: %s", path) - } - err = env.Open(path, 0, 0664) - if err != nil { - t.Errorf("Cannot open environment: %s", err) - } - err = env.Close() - if err != nil { - t.Errorf("Error during close of environment: %s", err) - } - // clean up - os.RemoveAll(path) -} - -func setup(t *testing.T) *Env { - env, err := NewEnv() - if err != nil { - t.Errorf("Cannot create enviroment: %s", err) - } - path, err := ioutil.TempDir("/tmp", "mdb_test") - if err != nil { - t.Errorf("Cannot create temporary directory") - } - err = os.MkdirAll(path, 0770) - if err != nil { - t.Errorf("Cannot create directory: %s", path) - } - err = env.Open(path, 0, 0664) - if err != nil { - t.Errorf("Cannot open environment: %s", err) - } - - return env -} - -func clean(env *Env, t *testing.T) { - path, err := env.Path() - if err != nil { - t.Errorf("Cannot get path") - } - if path == "" { - t.Errorf("Invalid path") - } - t.Logf("Env path: %s", path) - err = env.Close() - if err != nil { - t.Errorf("Error during close of environment: %s", err) - } - // clean up - os.RemoveAll(path) -} - -func TestEnvCopy(t *testing.T) { - env := setup(t) - clean(env, t) -} diff --git a/vendor/gomdb/error_test.go b/vendor/gomdb/error_test.go deleted file mode 100644 index 3c87745..0000000 --- a/vendor/gomdb/error_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build lmdb - -package mdb - -import ( - "syscall" - "testing" -) - -func TestErrno(t *testing.T) { - zeroerr := errno(0) - if zeroerr != nil { - t.Errorf("errno(0) != nil: %#v", zeroerr) - } - syserr := _errno(int(syscall.EINVAL)) - if syserr != syscall.EINVAL { // fails if syserr is Errno(syscall.EINVAL) - t.Errorf("errno(syscall.EINVAL) != syscall.EINVAL: %#v", syserr) - } - mdberr := _errno(int(KeyExist)) - if mdberr != KeyExist { // fails if syserr is Errno(syscall.EINVAL) - t.Errorf("errno(KeyExist) != KeyExist: %#v", syserr) - } -} diff --git a/vendor/gomdb/example_test.go b/vendor/gomdb/example_test.go deleted file mode 100644 index 817e273..0000000 --- a/vendor/gomdb/example_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build lmdb - -package mdb - -import ( - "fmt" - "io/ioutil" - "os" -) - -// Most mdb functions/methods can return errors. This example ignores errors -// for brevity. Real code should check all return values. -func Example() { - // create a directory to hold the database - path, _ := ioutil.TempDir("", "mdb_test") - defer os.RemoveAll(path) - - // open the db - env, _ := NewEnv() - env.SetMapSize(1 << 20) // max file size - env.Open(path, 0, 0664) - defer env.Close() - txn, _ := env.BeginTxn(nil, 0) - dbi, _ := txn.DBIOpen(nil, 0) - defer env.DBIClose(dbi) - txn.Commit() - - // write some data - txn, _ = env.BeginTxn(nil, 0) - num_entries := 5 - for i := 0; i < num_entries; i++ { - key := fmt.Sprintf("Key-%d", i) - val := fmt.Sprintf("Val-%d", i) - txn.Put(dbi, []byte(key), []byte(val), 0) - } - txn.Commit() - - // inspect the database - stat, _ := env.Stat() - fmt.Println(stat.Entries) - - // scan the database - txn, _ = env.BeginTxn(nil, RDONLY) - defer txn.Abort() - cursor, _ := txn.CursorOpen(dbi) - defer cursor.Close() - for { - bkey, bval, err := cursor.Get(nil, nil, NEXT) - if err == NotFound { - break - } - if err != nil { - panic(err) - } - fmt.Printf("%s: %s\n", bkey, bval) - } - - // random access - bval, _ := txn.Get(dbi, []byte("Key-3")) - fmt.Println(string(bval)) - - // Output: - // 5 - // Key-0: Val-0 - // Key-1: Val-1 - // Key-2: Val-2 - // Key-3: Val-3 - // Key-4: Val-4 - // Val-3 -} diff --git a/vendor/gomdb/lmdb.h b/vendor/gomdb/lmdb.h deleted file mode 100644 index c00fcae..0000000 --- a/vendor/gomdb/lmdb.h +++ /dev/null @@ -1,1555 +0,0 @@ -// +build lmdb - -/** @file lmdb.h - * @brief Lightning memory-mapped database library - * - * @mainpage Lightning Memory-Mapped Database Manager (LMDB) - * - * @section intro_sec Introduction - * LMDB is a Btree-based database management library modeled loosely on the - * BerkeleyDB API, but much simplified. The entire database is exposed - * in a memory map, and all data fetches return data directly - * from the mapped memory, so no malloc's or memcpy's occur during - * data fetches. As such, the library is extremely simple because it - * requires no page caching layer of its own, and it is extremely high - * performance and memory-efficient. It is also fully transactional with - * full ACID semantics, and when the memory map is read-only, the - * database integrity cannot be corrupted by stray pointer writes from - * application code. - * - * The library is fully thread-aware and supports concurrent read/write - * access from multiple processes and threads. Data pages use a copy-on- - * write strategy so no active data pages are ever overwritten, which - * also provides resistance to corruption and eliminates the need of any - * special recovery procedures after a system crash. Writes are fully - * serialized; only one write transaction may be active at a time, which - * guarantees that writers can never deadlock. The database structure is - * multi-versioned so readers run with no locks; writers cannot block - * readers, and readers don't block writers. - * - * Unlike other well-known database mechanisms which use either write-ahead - * transaction logs or append-only data writes, LMDB requires no maintenance - * during operation. Both write-ahead loggers and append-only databases - * require periodic checkpointing and/or compaction of their log or database - * files otherwise they grow without bound. LMDB tracks free pages within - * the database and re-uses them for new write operations, so the database - * size does not grow without bound in normal use. - * - * The memory map can be used as a read-only or read-write map. It is - * read-only by default as this provides total immunity to corruption. - * Using read-write mode offers much higher write performance, but adds - * the possibility for stray application writes thru pointers to silently - * corrupt the database. Of course if your application code is known to - * be bug-free (...) then this is not an issue. - * - * @section caveats_sec Caveats - * Troubleshooting the lock file, plus semaphores on BSD systems: - * - * - A broken lockfile can cause sync issues. - * Stale reader transactions left behind by an aborted program - * cause further writes to grow the database quickly, and - * stale locks can block further operation. - * - * Fix: Check for stale readers periodically, using the - * #mdb_reader_check function or the \ref mdb_stat_1 "mdb_stat" tool. Or just - * make all programs using the database close it; the lockfile - * is always reset on first open of the environment. - * - * - On BSD systems or others configured with MDB_USE_POSIX_SEM, - * startup can fail due to semaphores owned by another userid. - * - * Fix: Open and close the database as the user which owns the - * semaphores (likely last user) or as root, while no other - * process is using the database. - * - * Restrictions/caveats (in addition to those listed for some functions): - * - * - Only the database owner should normally use the database on - * BSD systems or when otherwise configured with MDB_USE_POSIX_SEM. - * Multiple users can cause startup to fail later, as noted above. - * - * - There is normally no pure read-only mode, since readers need write - * access to locks and lock file. Exceptions: On read-only filesystems - * or with the #MDB_NOLOCK flag described under #mdb_env_open(). - * - * - By default, in versions before 0.9.10, unused portions of the data - * file might receive garbage data from memory freed by other code. - * (This does not happen when using the #MDB_WRITEMAP flag.) As of - * 0.9.10 the default behavior is to initialize such memory before - * writing to the data file. Since there may be a slight performance - * cost due to this initialization, applications may disable it using - * the #MDB_NOMEMINIT flag. Applications handling sensitive data - * which must not be written should not use this flag. This flag is - * irrelevant when using #MDB_WRITEMAP. - * - * - A thread can only use one transaction at a time, plus any child - * transactions. Each transaction belongs to one thread. See below. - * The #MDB_NOTLS flag changes this for read-only transactions. - * - * - Use an MDB_env* in the process which opened it, without fork()ing. - * - * - Do not have open an LMDB database twice in the same process at - * the same time. Not even from a plain open() call - close()ing it - * breaks flock() advisory locking. - * - * - Avoid long-lived transactions. Read transactions prevent - * reuse of pages freed by newer write transactions, thus the - * database can grow quickly. Write transactions prevent - * other write transactions, since writes are serialized. - * - * - Avoid suspending a process with active transactions. These - * would then be "long-lived" as above. Also read transactions - * suspended when writers commit could sometimes see wrong data. - * - * ...when several processes can use a database concurrently: - * - * - Avoid aborting a process with an active transaction. - * The transaction becomes "long-lived" as above until a check - * for stale readers is performed or the lockfile is reset, - * since the process may not remove it from the lockfile. - * - * - If you do that anyway, do a periodic check for stale readers. Or - * close the environment once in a while, so the lockfile can get reset. - * - * - Do not use LMDB databases on remote filesystems, even between - * processes on the same host. This breaks flock() on some OSes, - * possibly memory map sync, and certainly sync between programs - * on different hosts. - * - * - Opening a database can fail if another process is opening or - * closing it at exactly the same time. - * - * @author Howard Chu, Symas Corporation. - * - * @copyright Copyright 2011-2014 Howard Chu, Symas Corp. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * . - * - * @par Derived From: - * This code is derived from btree.c written by Martin Hedenfalk. - * - * Copyright (c) 2009, 2010 Martin Hedenfalk - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#ifndef _LMDB_H_ -#define _LMDB_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** Unix permissions for creating files, or dummy definition for Windows */ -#ifdef _MSC_VER -typedef int mdb_mode_t; -#else -typedef mode_t mdb_mode_t; -#endif - -/** An abstraction for a file handle. - * On POSIX systems file handles are small integers. On Windows - * they're opaque pointers. - */ -#ifdef _WIN32 -typedef void *mdb_filehandle_t; -#else -typedef int mdb_filehandle_t; -#endif - -/** @defgroup mdb LMDB API - * @{ - * @brief OpenLDAP Lightning Memory-Mapped Database Manager - */ -/** @defgroup Version Version Macros - * @{ - */ -/** Library major version */ -#define MDB_VERSION_MAJOR 0 -/** Library minor version */ -#define MDB_VERSION_MINOR 9 -/** Library patch version */ -#define MDB_VERSION_PATCH 14 - -/** Combine args a,b,c into a single integer for easy version comparisons */ -#define MDB_VERINT(a,b,c) (((a) << 24) | ((b) << 16) | (c)) - -/** The full library version as a single integer */ -#define MDB_VERSION_FULL \ - MDB_VERINT(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH) - -/** The release date of this library version */ -#define MDB_VERSION_DATE "July 24, 2014" - -/** A stringifier for the version info */ -#define MDB_VERSTR(a,b,c,d) "LMDB " #a "." #b "." #c ": (" d ")" - -/** A helper for the stringifier macro */ -#define MDB_VERFOO(a,b,c,d) MDB_VERSTR(a,b,c,d) - -/** The full library version as a C string */ -#define MDB_VERSION_STRING \ - MDB_VERFOO(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH,MDB_VERSION_DATE) -/** @} */ - -/** @brief Opaque structure for a database environment. - * - * A DB environment supports multiple databases, all residing in the same - * shared-memory map. - */ -typedef struct MDB_env MDB_env; - -/** @brief Opaque structure for a transaction handle. - * - * All database operations require a transaction handle. Transactions may be - * read-only or read-write. - */ -typedef struct MDB_txn MDB_txn; - -/** @brief A handle for an individual database in the DB environment. */ -typedef unsigned int MDB_dbi; - -/** @brief Opaque structure for navigating through a database */ -typedef struct MDB_cursor MDB_cursor; - -/** @brief Generic structure used for passing keys and data in and out - * of the database. - * - * Values returned from the database are valid only until a subsequent - * update operation, or the end of the transaction. Do not modify or - * free them, they commonly point into the database itself. - * - * Key sizes must be between 1 and #mdb_env_get_maxkeysize() inclusive. - * The same applies to data sizes in databases with the #MDB_DUPSORT flag. - * Other data items can in theory be from 0 to 0xffffffff bytes long. - */ -typedef struct MDB_val { - size_t mv_size; /**< size of the data item */ - void *mv_data; /**< address of the data item */ -} MDB_val; - -/** @brief A callback function used to compare two keys in a database */ -typedef int (MDB_cmp_func)(const MDB_val *a, const MDB_val *b); - -/** @brief A callback function used to relocate a position-dependent data item - * in a fixed-address database. - * - * The \b newptr gives the item's desired address in - * the memory map, and \b oldptr gives its previous address. The item's actual - * data resides at the address in \b item. This callback is expected to walk - * through the fields of the record in \b item and modify any - * values based at the \b oldptr address to be relative to the \b newptr address. - * @param[in,out] item The item that is to be relocated. - * @param[in] oldptr The previous address. - * @param[in] newptr The new address to relocate to. - * @param[in] relctx An application-provided context, set by #mdb_set_relctx(). - * @todo This feature is currently unimplemented. - */ -typedef void (MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr, void *relctx); - -/** @defgroup mdb_env Environment Flags - * @{ - */ - /** mmap at a fixed address (experimental) */ -#define MDB_FIXEDMAP 0x01 - /** no environment directory */ -#define MDB_NOSUBDIR 0x4000 - /** don't fsync after commit */ -#define MDB_NOSYNC 0x10000 - /** read only */ -#define MDB_RDONLY 0x20000 - /** don't fsync metapage after commit */ -#define MDB_NOMETASYNC 0x40000 - /** use writable mmap */ -#define MDB_WRITEMAP 0x80000 - /** use asynchronous msync when #MDB_WRITEMAP is used */ -#define MDB_MAPASYNC 0x100000 - /** tie reader locktable slots to #MDB_txn objects instead of to threads */ -#define MDB_NOTLS 0x200000 - /** don't do any locking, caller must manage their own locks */ -#define MDB_NOLOCK 0x400000 - /** don't do readahead (no effect on Windows) */ -#define MDB_NORDAHEAD 0x800000 - /** don't initialize malloc'd memory before writing to datafile */ -#define MDB_NOMEMINIT 0x1000000 -/** @} */ - -/** @defgroup mdb_dbi_open Database Flags - * @{ - */ - /** use reverse string keys */ -#define MDB_REVERSEKEY 0x02 - /** use sorted duplicates */ -#define MDB_DUPSORT 0x04 - /** numeric keys in native byte order. - * The keys must all be of the same size. */ -#define MDB_INTEGERKEY 0x08 - /** with #MDB_DUPSORT, sorted dup items have fixed size */ -#define MDB_DUPFIXED 0x10 - /** with #MDB_DUPSORT, dups are numeric in native byte order */ -#define MDB_INTEGERDUP 0x20 - /** with #MDB_DUPSORT, use reverse string dups */ -#define MDB_REVERSEDUP 0x40 - /** create DB if not already existing */ -#define MDB_CREATE 0x40000 -/** @} */ - -/** @defgroup mdb_put Write Flags - * @{ - */ -/** For put: Don't write if the key already exists. */ -#define MDB_NOOVERWRITE 0x10 -/** Only for #MDB_DUPSORT
- * For put: don't write if the key and data pair already exist.
- * For mdb_cursor_del: remove all duplicate data items. - */ -#define MDB_NODUPDATA 0x20 -/** For mdb_cursor_put: overwrite the current key/data pair */ -#define MDB_CURRENT 0x40 -/** For put: Just reserve space for data, don't copy it. Return a - * pointer to the reserved space. - */ -#define MDB_RESERVE 0x10000 -/** Data is being appended, don't split full pages. */ -#define MDB_APPEND 0x20000 -/** Duplicate data is being appended, don't split full pages. */ -#define MDB_APPENDDUP 0x40000 -/** Store multiple data items in one call. Only for #MDB_DUPFIXED. */ -#define MDB_MULTIPLE 0x80000 -/* @} */ - -/** @defgroup mdb_copy Copy Flags - * @{ - */ -/** Compacting copy: Omit free space from copy, and renumber all - * pages sequentially. - */ -#define MDB_CP_COMPACT 0x01 -/* @} */ - -/** @brief Cursor Get operations. - * - * This is the set of all operations for retrieving data - * using a cursor. - */ -typedef enum MDB_cursor_op { - MDB_FIRST, /**< Position at first key/data item */ - MDB_FIRST_DUP, /**< Position at first data item of current key. - Only for #MDB_DUPSORT */ - MDB_GET_BOTH, /**< Position at key/data pair. Only for #MDB_DUPSORT */ - MDB_GET_BOTH_RANGE, /**< position at key, nearest data. Only for #MDB_DUPSORT */ - MDB_GET_CURRENT, /**< Return key/data at current cursor position */ - MDB_GET_MULTIPLE, /**< Return key and up to a page of duplicate data items - from current cursor position. Move cursor to prepare - for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ - MDB_LAST, /**< Position at last key/data item */ - MDB_LAST_DUP, /**< Position at last data item of current key. - Only for #MDB_DUPSORT */ - MDB_NEXT, /**< Position at next data item */ - MDB_NEXT_DUP, /**< Position at next data item of current key. - Only for #MDB_DUPSORT */ - MDB_NEXT_MULTIPLE, /**< Return key and up to a page of duplicate data items - from next cursor position. Move cursor to prepare - for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ - MDB_NEXT_NODUP, /**< Position at first data item of next key */ - MDB_PREV, /**< Position at previous data item */ - MDB_PREV_DUP, /**< Position at previous data item of current key. - Only for #MDB_DUPSORT */ - MDB_PREV_NODUP, /**< Position at last data item of previous key */ - MDB_SET, /**< Position at specified key */ - MDB_SET_KEY, /**< Position at specified key, return key + data */ - MDB_SET_RANGE /**< Position at first key greater than or equal to specified key. */ -} MDB_cursor_op; - -/** @defgroup errors Return Codes - * - * BerkeleyDB uses -30800 to -30999, we'll go under them - * @{ - */ - /** Successful result */ -#define MDB_SUCCESS 0 - /** key/data pair already exists */ -#define MDB_KEYEXIST (-30799) - /** key/data pair not found (EOF) */ -#define MDB_NOTFOUND (-30798) - /** Requested page not found - this usually indicates corruption */ -#define MDB_PAGE_NOTFOUND (-30797) - /** Located page was wrong type */ -#define MDB_CORRUPTED (-30796) - /** Update of meta page failed, probably I/O error */ -#define MDB_PANIC (-30795) - /** Environment version mismatch */ -#define MDB_VERSION_MISMATCH (-30794) - /** File is not a valid LMDB file */ -#define MDB_INVALID (-30793) - /** Environment mapsize reached */ -#define MDB_MAP_FULL (-30792) - /** Environment maxdbs reached */ -#define MDB_DBS_FULL (-30791) - /** Environment maxreaders reached */ -#define MDB_READERS_FULL (-30790) - /** Too many TLS keys in use - Windows only */ -#define MDB_TLS_FULL (-30789) - /** Txn has too many dirty pages */ -#define MDB_TXN_FULL (-30788) - /** Cursor stack too deep - internal error */ -#define MDB_CURSOR_FULL (-30787) - /** Page has not enough space - internal error */ -#define MDB_PAGE_FULL (-30786) - /** Database contents grew beyond environment mapsize */ -#define MDB_MAP_RESIZED (-30785) - /** MDB_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed */ -#define MDB_INCOMPATIBLE (-30784) - /** Invalid reuse of reader locktable slot */ -#define MDB_BAD_RSLOT (-30783) - /** Transaction cannot recover - it must be aborted */ -#define MDB_BAD_TXN (-30782) - /** Unsupported size of key/DB name/data, or wrong DUPFIXED size */ -#define MDB_BAD_VALSIZE (-30781) - /** The specified DBI was changed unexpectedly */ -#define MDB_BAD_DBI (-30780) - /** The last defined error code */ -#define MDB_LAST_ERRCODE MDB_BAD_DBI -/** @} */ - -/** @brief Statistics for a database in the environment */ -typedef struct MDB_stat { - unsigned int ms_psize; /**< Size of a database page. - This is currently the same for all databases. */ - unsigned int ms_depth; /**< Depth (height) of the B-tree */ - size_t ms_branch_pages; /**< Number of internal (non-leaf) pages */ - size_t ms_leaf_pages; /**< Number of leaf pages */ - size_t ms_overflow_pages; /**< Number of overflow pages */ - size_t ms_entries; /**< Number of data items */ -} MDB_stat; - -/** @brief Information about the environment */ -typedef struct MDB_envinfo { - void *me_mapaddr; /**< Address of map, if fixed */ - size_t me_mapsize; /**< Size of the data memory map */ - size_t me_last_pgno; /**< ID of the last used page */ - size_t me_last_txnid; /**< ID of the last committed transaction */ - unsigned int me_maxreaders; /**< max reader slots in the environment */ - unsigned int me_numreaders; /**< max reader slots used in the environment */ -} MDB_envinfo; - - /** @brief Return the LMDB library version information. - * - * @param[out] major if non-NULL, the library major version number is copied here - * @param[out] minor if non-NULL, the library minor version number is copied here - * @param[out] patch if non-NULL, the library patch version number is copied here - * @retval "version string" The library version as a string - */ -char *mdb_version(int *major, int *minor, int *patch); - - /** @brief Return a string describing a given error code. - * - * This function is a superset of the ANSI C X3.159-1989 (ANSI C) strerror(3) - * function. If the error code is greater than or equal to 0, then the string - * returned by the system function strerror(3) is returned. If the error code - * is less than 0, an error string corresponding to the LMDB library error is - * returned. See @ref errors for a list of LMDB-specific error codes. - * @param[in] err The error code - * @retval "error message" The description of the error - */ -char *mdb_strerror(int err); - - /** @brief Create an LMDB environment handle. - * - * This function allocates memory for a #MDB_env structure. To release - * the allocated memory and discard the handle, call #mdb_env_close(). - * Before the handle may be used, it must be opened using #mdb_env_open(). - * Various other options may also need to be set before opening the handle, - * e.g. #mdb_env_set_mapsize(), #mdb_env_set_maxreaders(), #mdb_env_set_maxdbs(), - * depending on usage requirements. - * @param[out] env The address where the new handle will be stored - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_create(MDB_env **env); - - /** @brief Open an environment handle. - * - * If this function fails, #mdb_env_close() must be called to discard the #MDB_env handle. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] path The directory in which the database files reside. This - * directory must already exist and be writable. - * @param[in] flags Special options for this environment. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - * Flags set by mdb_env_set_flags() are also used. - *

    - *
  • #MDB_FIXEDMAP - * use a fixed address for the mmap region. This flag must be specified - * when creating the environment, and is stored persistently in the environment. - * If successful, the memory map will always reside at the same virtual address - * and pointers used to reference data items in the database will be constant - * across multiple invocations. This option may not always work, depending on - * how the operating system has allocated memory to shared libraries and other uses. - * The feature is highly experimental. - *
  • #MDB_NOSUBDIR - * By default, LMDB creates its environment in a directory whose - * pathname is given in \b path, and creates its data and lock files - * under that directory. With this option, \b path is used as-is for - * the database main data file. The database lock file is the \b path - * with "-lock" appended. - *
  • #MDB_RDONLY - * Open the environment in read-only mode. No write operations will be - * allowed. LMDB will still modify the lock file - except on read-only - * filesystems, where LMDB does not use locks. - *
  • #MDB_WRITEMAP - * Use a writeable memory map unless MDB_RDONLY is set. This is faster - * and uses fewer mallocs, but loses protection from application bugs - * like wild pointer writes and other bad updates into the database. - * Incompatible with nested transactions. - * Processes with and without MDB_WRITEMAP on the same environment do - * not cooperate well. - *
  • #MDB_NOMETASYNC - * Flush system buffers to disk only once per transaction, omit the - * metadata flush. Defer that until the system flushes files to disk, - * or next non-MDB_RDONLY commit or #mdb_env_sync(). This optimization - * maintains database integrity, but a system crash may undo the last - * committed transaction. I.e. it preserves the ACI (atomicity, - * consistency, isolation) but not D (durability) database property. - * This flag may be changed at any time using #mdb_env_set_flags(). - *
  • #MDB_NOSYNC - * Don't flush system buffers to disk when committing a transaction. - * This optimization means a system crash can corrupt the database or - * lose the last transactions if buffers are not yet flushed to disk. - * The risk is governed by how often the system flushes dirty buffers - * to disk and how often #mdb_env_sync() is called. However, if the - * filesystem preserves write order and the #MDB_WRITEMAP flag is not - * used, transactions exhibit ACI (atomicity, consistency, isolation) - * properties and only lose D (durability). I.e. database integrity - * is maintained, but a system crash may undo the final transactions. - * Note that (#MDB_NOSYNC | #MDB_WRITEMAP) leaves the system with no - * hint for when to write transactions to disk, unless #mdb_env_sync() - * is called. (#MDB_MAPASYNC | #MDB_WRITEMAP) may be preferable. - * This flag may be changed at any time using #mdb_env_set_flags(). - *
  • #MDB_MAPASYNC - * When using #MDB_WRITEMAP, use asynchronous flushes to disk. - * As with #MDB_NOSYNC, a system crash can then corrupt the - * database or lose the last transactions. Calling #mdb_env_sync() - * ensures on-disk database integrity until next commit. - * This flag may be changed at any time using #mdb_env_set_flags(). - *
  • #MDB_NOTLS - * Don't use Thread-Local Storage. Tie reader locktable slots to - * #MDB_txn objects instead of to threads. I.e. #mdb_txn_reset() keeps - * the slot reseved for the #MDB_txn object. A thread may use parallel - * read-only transactions. A read-only transaction may span threads if - * the user synchronizes its use. Applications that multiplex many - * user threads over individual OS threads need this option. Such an - * application must also serialize the write transactions in an OS - * thread, since LMDB's write locking is unaware of the user threads. - *
  • #MDB_NOLOCK - * Don't do any locking. If concurrent access is anticipated, the - * caller must manage all concurrency itself. For proper operation - * the caller must enforce single-writer semantics, and must ensure - * that no readers are using old transactions while a writer is - * active. The simplest approach is to use an exclusive lock so that - * no readers may be active at all when a writer begins. - *
  • #MDB_NORDAHEAD - * Turn off readahead. Most operating systems perform readahead on - * read requests by default. This option turns it off if the OS - * supports it. Turning it off may help random read performance - * when the DB is larger than RAM and system RAM is full. - * The option is not implemented on Windows. - *
  • #MDB_NOMEMINIT - * Don't initialize malloc'd memory before writing to unused spaces - * in the data file. By default, memory for pages written to the data - * file is obtained using malloc. While these pages may be reused in - * subsequent transactions, freshly malloc'd pages will be initialized - * to zeroes before use. This avoids persisting leftover data from other - * code (that used the heap and subsequently freed the memory) into the - * data file. Note that many other system libraries may allocate - * and free memory from the heap for arbitrary uses. E.g., stdio may - * use the heap for file I/O buffers. This initialization step has a - * modest performance cost so some applications may want to disable - * it using this flag. This option can be a problem for applications - * which handle sensitive data like passwords, and it makes memory - * checkers like Valgrind noisy. This flag is not needed with #MDB_WRITEMAP, - * which writes directly to the mmap instead of using malloc for pages. The - * initialization is also skipped if #MDB_RESERVE is used; the - * caller is expected to overwrite all of the memory that was - * reserved in that case. - * This flag may be changed at any time using #mdb_env_set_flags(). - *
- * @param[in] mode The UNIX permissions to set on created files. This parameter - * is ignored on Windows. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_VERSION_MISMATCH - the version of the LMDB library doesn't match the - * version that created the database environment. - *
  • #MDB_INVALID - the environment file headers are corrupted. - *
  • ENOENT - the directory specified by the path parameter doesn't exist. - *
  • EACCES - the user didn't have permission to access the environment files. - *
  • EAGAIN - the environment was locked by another process. - *
- */ -int mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode); - - /** @brief Copy an LMDB environment to the specified path. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] path The directory in which the copy will reside. This - * directory must already exist and be writable but must otherwise be - * empty. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copy(MDB_env *env, const char *path); - - /** @brief Copy an LMDB environment to the specified file descriptor. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] fd The filedescriptor to write the copy to. It must - * have already been opened for Write access. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copyfd(MDB_env *env, mdb_filehandle_t fd); - - /** @brief Copy an LMDB environment to the specified path, with options. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] path The directory in which the copy will reside. This - * directory must already exist and be writable but must otherwise be - * empty. - * @param[in] flags Special options for this operation. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - *
    - *
  • #MDB_CP_COMPACT - Perform compaction while copying: omit free - * pages and sequentially renumber all pages in output. This option - * consumes more CPU and runs more slowly than the default. - *
- * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags); - - /** @brief Copy an LMDB environment to the specified file descriptor, - * with options. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. See - * #mdb_env_copy2() for further details. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] fd The filedescriptor to write the copy to. It must - * have already been opened for Write access. - * @param[in] flags Special options for this operation. - * See #mdb_env_copy2() for options. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copyfd2(MDB_env *env, mdb_filehandle_t fd, unsigned int flags); - - /** @brief Return statistics about the LMDB environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] stat The address of an #MDB_stat structure - * where the statistics will be copied - */ -int mdb_env_stat(MDB_env *env, MDB_stat *stat); - - /** @brief Return information about the LMDB environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] stat The address of an #MDB_envinfo structure - * where the information will be copied - */ -int mdb_env_info(MDB_env *env, MDB_envinfo *stat); - - /** @brief Flush the data buffers to disk. - * - * Data is always written to disk when #mdb_txn_commit() is called, - * but the operating system may keep it buffered. LMDB always flushes - * the OS buffers upon commit as well, unless the environment was - * opened with #MDB_NOSYNC or in part #MDB_NOMETASYNC. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] force If non-zero, force a synchronous flush. Otherwise - * if the environment has the #MDB_NOSYNC flag set the flushes - * will be omitted, and with #MDB_MAPASYNC they will be asynchronous. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
  • EIO - an error occurred during synchronization. - *
- */ -int mdb_env_sync(MDB_env *env, int force); - - /** @brief Close the environment and release the memory map. - * - * Only a single thread may call this function. All transactions, databases, - * and cursors must already be closed before calling this function. Attempts to - * use any such handles after calling this function will cause a SIGSEGV. - * The environment handle will be freed and must not be used again after this call. - * @param[in] env An environment handle returned by #mdb_env_create() - */ -void mdb_env_close(MDB_env *env); - - /** @brief Set environment flags. - * - * This may be used to set some flags in addition to those from - * #mdb_env_open(), or to unset these flags. If several threads - * change the flags at the same time, the result is undefined. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] flags The flags to change, bitwise OR'ed together - * @param[in] onoff A non-zero value sets the flags, zero clears them. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff); - - /** @brief Get environment flags. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] flags The address of an integer to store the flags - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_get_flags(MDB_env *env, unsigned int *flags); - - /** @brief Return the path that was used in #mdb_env_open(). - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] path Address of a string pointer to contain the path. This - * is the actual string in the environment, not a copy. It should not be - * altered in any way. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_get_path(MDB_env *env, const char **path); - - /** @brief Return the filedescriptor for the given environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] fd Address of a mdb_filehandle_t to contain the descriptor. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *fd); - - /** @brief Set the size of the memory map to use for this environment. - * - * The size should be a multiple of the OS page size. The default is - * 10485760 bytes. The size of the memory map is also the maximum size - * of the database. The value should be chosen as large as possible, - * to accommodate future growth of the database. - * This function should be called after #mdb_env_create() and before #mdb_env_open(). - * It may be called at later times if no transactions are active in - * this process. Note that the library does not check for this condition, - * the caller must ensure it explicitly. - * - * The new size takes effect immediately for the current process but - * will not be persisted to any others until a write transaction has been - * committed by the current process. Also, only mapsize increases are - * persisted into the environment. - * - * If the mapsize is increased by another process, and data has grown - * beyond the range of the current mapsize, #mdb_txn_begin() will - * return #MDB_MAP_RESIZED. This function may be called with a size - * of zero to adopt the new size. - * - * Any attempt to set a size smaller than the space already consumed - * by the environment will be silently changed to the current size of the used space. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] size The size in bytes - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified, or the environment has - * an active write transaction. - *
- */ -int mdb_env_set_mapsize(MDB_env *env, size_t size); - - /** @brief Set the maximum number of threads/reader slots for the environment. - * - * This defines the number of slots in the lock table that is used to track readers in the - * the environment. The default is 126. - * Starting a read-only transaction normally ties a lock table slot to the - * current thread until the environment closes or the thread exits. If - * MDB_NOTLS is in use, #mdb_txn_begin() instead ties the slot to the - * MDB_txn object until it or the #MDB_env object is destroyed. - * This function may only be called after #mdb_env_create() and before #mdb_env_open(). - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] readers The maximum number of reader lock table slots - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified, or the environment is already open. - *
- */ -int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers); - - /** @brief Get the maximum number of threads/reader slots for the environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] readers Address of an integer to store the number of readers - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers); - - /** @brief Set the maximum number of named databases for the environment. - * - * This function is only needed if multiple databases will be used in the - * environment. Simpler applications that use the environment as a single - * unnamed database can ignore this option. - * This function may only be called after #mdb_env_create() and before #mdb_env_open(). - * - * Currently a moderate number of slots are cheap but a huge number gets - * expensive: 7-120 words per transaction, and every #mdb_dbi_open() - * does a linear search of the opened slots. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] dbs The maximum number of databases - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified, or the environment is already open. - *
- */ -int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs); - - /** @brief Get the maximum size of keys and #MDB_DUPSORT data we can write. - * - * Depends on the compile-time constant #MDB_MAXKEYSIZE. Default 511. - * See @ref MDB_val. - * @param[in] env An environment handle returned by #mdb_env_create() - * @return The maximum size of a key we can write - */ -int mdb_env_get_maxkeysize(MDB_env *env); - - /** @brief Set application information associated with the #MDB_env. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] ctx An arbitrary pointer for whatever the application needs. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_set_userctx(MDB_env *env, void *ctx); - - /** @brief Get the application information associated with the #MDB_env. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @return The pointer set by #mdb_env_set_userctx(). - */ -void *mdb_env_get_userctx(MDB_env *env); - - /** @brief A callback function for most LMDB assert() failures, - * called before printing the message and aborting. - * - * @param[in] env An environment handle returned by #mdb_env_create(). - * @param[in] msg The assertion message, not including newline. - */ -typedef void MDB_assert_func(MDB_env *env, const char *msg); - - /** Set or reset the assert() callback of the environment. - * Disabled if liblmdb is buillt with NDEBUG. - * @note This hack should become obsolete as lmdb's error handling matures. - * @param[in] env An environment handle returned by #mdb_env_create(). - * @param[in] func An #MDB_assert_func function, or 0. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_set_assert(MDB_env *env, MDB_assert_func *func); - - /** @brief Create a transaction for use with the environment. - * - * The transaction handle may be discarded using #mdb_txn_abort() or #mdb_txn_commit(). - * @note A transaction and its cursors must only be used by a single - * thread, and a thread may only have a single transaction at a time. - * If #MDB_NOTLS is in use, this does not apply to read-only transactions. - * @note Cursors may not span transactions. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] parent If this parameter is non-NULL, the new transaction - * will be a nested transaction, with the transaction indicated by \b parent - * as its parent. Transactions may be nested to any level. A parent - * transaction and its cursors may not issue any other operations than - * mdb_txn_commit and mdb_txn_abort while it has active child transactions. - * @param[in] flags Special options for this transaction. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - *
    - *
  • #MDB_RDONLY - * This transaction will not perform any write operations. - *
- * @param[out] txn Address where the new #MDB_txn handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_PANIC - a fatal error occurred earlier and the environment - * must be shut down. - *
  • #MDB_MAP_RESIZED - another process wrote data beyond this MDB_env's - * mapsize and this environment's map must be resized as well. - * See #mdb_env_set_mapsize(). - *
  • #MDB_READERS_FULL - a read-only transaction was requested and - * the reader lock table is full. See #mdb_env_set_maxreaders(). - *
  • ENOMEM - out of memory. - *
- */ -int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **txn); - - /** @brief Returns the transaction's #MDB_env - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -MDB_env *mdb_txn_env(MDB_txn *txn); - - /** @brief Commit all the operations of a transaction into the database. - * - * The transaction handle is freed. It and its cursors must not be used - * again after this call, except with #mdb_cursor_renew(). - * @note Earlier documentation incorrectly said all cursors would be freed. - * Only write-transactions free cursors. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
  • ENOSPC - no more disk space. - *
  • EIO - a low-level I/O error occurred while writing. - *
  • ENOMEM - out of memory. - *
- */ -int mdb_txn_commit(MDB_txn *txn); - - /** @brief Abandon all the operations of the transaction instead of saving them. - * - * The transaction handle is freed. It and its cursors must not be used - * again after this call, except with #mdb_cursor_renew(). - * @note Earlier documentation incorrectly said all cursors would be freed. - * Only write-transactions free cursors. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -void mdb_txn_abort(MDB_txn *txn); - - /** @brief Reset a read-only transaction. - * - * Abort the transaction like #mdb_txn_abort(), but keep the transaction - * handle. #mdb_txn_renew() may reuse the handle. This saves allocation - * overhead if the process will start a new read-only transaction soon, - * and also locking overhead if #MDB_NOTLS is in use. The reader table - * lock is released, but the table slot stays tied to its thread or - * #MDB_txn. Use mdb_txn_abort() to discard a reset handle, and to free - * its lock table slot if MDB_NOTLS is in use. - * Cursors opened within the transaction must not be used - * again after this call, except with #mdb_cursor_renew(). - * Reader locks generally don't interfere with writers, but they keep old - * versions of database pages allocated. Thus they prevent the old pages - * from being reused when writers commit new data, and so under heavy load - * the database size may grow much more rapidly than otherwise. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -void mdb_txn_reset(MDB_txn *txn); - - /** @brief Renew a read-only transaction. - * - * This acquires a new reader lock for a transaction handle that had been - * released by #mdb_txn_reset(). It must be called before a reset transaction - * may be used again. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_PANIC - a fatal error occurred earlier and the environment - * must be shut down. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_txn_renew(MDB_txn *txn); - -/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */ -#define mdb_open(txn,name,flags,dbi) mdb_dbi_open(txn,name,flags,dbi) -/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */ -#define mdb_close(env,dbi) mdb_dbi_close(env,dbi) - - /** @brief Open a database in the environment. - * - * A database handle denotes the name and parameters of a database, - * independently of whether such a database exists. - * The database handle may be discarded by calling #mdb_dbi_close(). - * The old database handle is returned if the database was already open. - * The handle may only be closed once. - * The database handle will be private to the current transaction until - * the transaction is successfully committed. If the transaction is - * aborted the handle will be closed automatically. - * After a successful commit the - * handle will reside in the shared environment, and may be used - * by other transactions. This function must not be called from - * multiple concurrent transactions. A transaction that uses this function - * must finish (either commit or abort) before any other transaction may - * use this function. - * - * To use named databases (with name != NULL), #mdb_env_set_maxdbs() - * must be called before opening the environment. Database names - * are kept as keys in the unnamed database. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] name The name of the database to open. If only a single - * database is needed in the environment, this value may be NULL. - * @param[in] flags Special options for this database. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - *
    - *
  • #MDB_REVERSEKEY - * Keys are strings to be compared in reverse order, from the end - * of the strings to the beginning. By default, Keys are treated as strings and - * compared from beginning to end. - *
  • #MDB_DUPSORT - * Duplicate keys may be used in the database. (Or, from another perspective, - * keys may have multiple data items, stored in sorted order.) By default - * keys must be unique and may have only a single data item. - *
  • #MDB_INTEGERKEY - * Keys are binary integers in native byte order. Setting this option - * requires all keys to be the same size, typically sizeof(int) - * or sizeof(size_t). - *
  • #MDB_DUPFIXED - * This flag may only be used in combination with #MDB_DUPSORT. This option - * tells the library that the data items for this database are all the same - * size, which allows further optimizations in storage and retrieval. When - * all data items are the same size, the #MDB_GET_MULTIPLE and #MDB_NEXT_MULTIPLE - * cursor operations may be used to retrieve multiple items at once. - *
  • #MDB_INTEGERDUP - * This option specifies that duplicate data items are also integers, and - * should be sorted as such. - *
  • #MDB_REVERSEDUP - * This option specifies that duplicate data items should be compared as - * strings in reverse order. - *
  • #MDB_CREATE - * Create the named database if it doesn't exist. This option is not - * allowed in a read-only transaction or a read-only environment. - *
- * @param[out] dbi Address where the new #MDB_dbi handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_NOTFOUND - the specified database doesn't exist in the environment - * and #MDB_CREATE was not specified. - *
  • #MDB_DBS_FULL - too many databases have been opened. See #mdb_env_set_maxdbs(). - *
- */ -int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi); - - /** @brief Retrieve statistics for a database. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] stat The address of an #MDB_stat structure - * where the statistics will be copied - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat); - - /** @brief Retrieve the DB flags for a database handle. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] flags Address where the flags will be returned. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags); - - /** @brief Close a database handle. Normally unnecessary. Use with care: - * - * This call is not mutex protected. Handles should only be closed by - * a single thread, and only if no other threads are going to reference - * the database handle or one of its cursors any further. Do not close - * a handle if an existing transaction has modified its database. - * Doing so can cause misbehavior from database corruption to errors - * like MDB_BAD_VALSIZE (since the DB name is gone). - * - * Closing a database handle is not necessary, but lets #mdb_dbi_open() - * reuse the handle value. Usually it's better to set a bigger - * #mdb_env_set_maxdbs(), unless that value would be large. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - */ -void mdb_dbi_close(MDB_env *env, MDB_dbi dbi); - - /** @brief Empty or delete+close a database. - * - * See #mdb_dbi_close() for restrictions about closing the DB handle. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] del 0 to empty the DB, 1 to delete it from the - * environment and close the DB handle. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del); - - /** @brief Set a custom key comparison function for a database. - * - * The comparison function is called whenever it is necessary to compare a - * key specified by the application with a key currently stored in the database. - * If no comparison function is specified, and no special key flags were specified - * with #mdb_dbi_open(), the keys are compared lexically, with shorter keys collating - * before longer keys. - * @warning This function must be called before any data access functions are used, - * otherwise data corruption may occur. The same comparison function must be used by every - * program accessing the database, every time the database is used. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] cmp A #MDB_cmp_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp); - - /** @brief Set a custom data comparison function for a #MDB_DUPSORT database. - * - * This comparison function is called whenever it is necessary to compare a data - * item specified by the application with a data item currently stored in the database. - * This function only takes effect if the database was opened with the #MDB_DUPSORT - * flag. - * If no comparison function is specified, and no special key flags were specified - * with #mdb_dbi_open(), the data items are compared lexically, with shorter items collating - * before longer items. - * @warning This function must be called before any data access functions are used, - * otherwise data corruption may occur. The same comparison function must be used by every - * program accessing the database, every time the database is used. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] cmp A #MDB_cmp_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp); - - /** @brief Set a relocation function for a #MDB_FIXEDMAP database. - * - * @todo The relocation function is called whenever it is necessary to move the data - * of an item to a different position in the database (e.g. through tree - * balancing operations, shifts as a result of adds or deletes, etc.). It is - * intended to allow address/position-dependent data items to be stored in - * a database in an environment opened with the #MDB_FIXEDMAP option. - * Currently the relocation feature is unimplemented and setting - * this function has no effect. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] rel A #MDB_rel_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel); - - /** @brief Set a context pointer for a #MDB_FIXEDMAP database's relocation function. - * - * See #mdb_set_relfunc and #MDB_rel_func for more details. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] ctx An arbitrary pointer for whatever the application needs. - * It will be passed to the callback function set by #mdb_set_relfunc - * as its \b relctx parameter whenever the callback is invoked. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx); - - /** @brief Get items from a database. - * - * This function retrieves key/data pairs from the database. The address - * and length of the data associated with the specified \b key are returned - * in the structure to which \b data refers. - * If the database supports duplicate keys (#MDB_DUPSORT) then the - * first data item for the key will be returned. Retrieval of other - * items requires the use of #mdb_cursor_get(). - * - * @note The memory pointed to by the returned values is owned by the - * database. The caller need not dispose of the memory, and may not - * modify it in any way. For values returned in a read-only transaction - * any modification attempts will cause a SIGSEGV. - * @note Values returned from the database are valid only until a - * subsequent update operation, or the end of the transaction. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to search for in the database - * @param[out] data The data corresponding to the key - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_NOTFOUND - the key was not in the database. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); - - /** @brief Store items into a database. - * - * This function stores key/data pairs in the database. The default behavior - * is to enter the new key/data pair, replacing any previously existing key - * if duplicates are disallowed, or adding a duplicate data item if - * duplicates are allowed (#MDB_DUPSORT). - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to store in the database - * @param[in,out] data The data to store - * @param[in] flags Special options for this operation. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - *
    - *
  • #MDB_NODUPDATA - enter the new key/data pair only if it does not - * already appear in the database. This flag may only be specified - * if the database was opened with #MDB_DUPSORT. The function will - * return #MDB_KEYEXIST if the key/data pair already appears in the - * database. - *
  • #MDB_NOOVERWRITE - enter the new key/data pair only if the key - * does not already appear in the database. The function will return - * #MDB_KEYEXIST if the key already appears in the database, even if - * the database supports duplicates (#MDB_DUPSORT). The \b data - * parameter will be set to point to the existing item. - *
  • #MDB_RESERVE - reserve space for data of the given size, but - * don't copy the given data. Instead, return a pointer to the - * reserved space, which the caller can fill in later - before - * the next update operation or the transaction ends. This saves - * an extra memcpy if the data is being generated later. - * LMDB does nothing else with this memory, the caller is expected - * to modify all of the space requested. - *
  • #MDB_APPEND - append the given key/data pair to the end of the - * database. No key comparisons are performed. This option allows - * fast bulk loading when keys are already known to be in the - * correct order. Loading unsorted keys with this flag will cause - * data corruption. - *
  • #MDB_APPENDDUP - as above, but for sorted dup data. - *
- * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). - *
  • #MDB_TXN_FULL - the transaction has too many dirty pages. - *
  • EACCES - an attempt was made to write in a read-only transaction. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_put(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, - unsigned int flags); - - /** @brief Delete items from a database. - * - * This function removes key/data pairs from the database. - * If the database does not support sorted duplicate data items - * (#MDB_DUPSORT) the data parameter is ignored. - * If the database supports sorted duplicates and the data parameter - * is NULL, all of the duplicate data items for the key will be - * deleted. Otherwise, if the data parameter is non-NULL - * only the matching data item will be deleted. - * This function will return #MDB_NOTFOUND if the specified key/data - * pair is not in the database. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to delete from the database - * @param[in] data The data to delete - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EACCES - an attempt was made to write in a read-only transaction. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_del(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); - - /** @brief Create a cursor handle. - * - * A cursor is associated with a specific transaction and database. - * A cursor cannot be used when its database handle is closed. Nor - * when its transaction has ended, except with #mdb_cursor_renew(). - * It can be discarded with #mdb_cursor_close(). - * A cursor in a write-transaction can be closed before its transaction - * ends, and will otherwise be closed when its transaction ends. - * A cursor in a read-only transaction must be closed explicitly, before - * or after its transaction ends. It can be reused with - * #mdb_cursor_renew() before finally closing it. - * @note Earlier documentation said that cursors in every transaction - * were closed when the transaction committed or aborted. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] cursor Address where the new #MDB_cursor handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor); - - /** @brief Close a cursor handle. - * - * The cursor handle will be freed and must not be used again after this call. - * Its transaction must still be live if it is a write-transaction. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -void mdb_cursor_close(MDB_cursor *cursor); - - /** @brief Renew a cursor handle. - * - * A cursor is associated with a specific transaction and database. - * Cursors that are only used in read-only - * transactions may be re-used, to avoid unnecessary malloc/free overhead. - * The cursor may be associated with a new read-only transaction, and - * referencing the same database handle as it was created with. - * This may be done whether the previous transaction is live or dead. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_renew(MDB_txn *txn, MDB_cursor *cursor); - - /** @brief Return the cursor's transaction handle. - * - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -MDB_txn *mdb_cursor_txn(MDB_cursor *cursor); - - /** @brief Return the cursor's database handle. - * - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -MDB_dbi mdb_cursor_dbi(MDB_cursor *cursor); - - /** @brief Retrieve by cursor. - * - * This function retrieves key/data pairs from the database. The address and length - * of the key are returned in the object to which \b key refers (except for the - * case of the #MDB_SET option, in which the \b key object is unchanged), and - * the address and length of the data are returned in the object to which \b data - * refers. - * See #mdb_get() for restrictions on using the output values. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in,out] key The key for a retrieved item - * @param[in,out] data The data of a retrieved item - * @param[in] op A cursor operation #MDB_cursor_op - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_NOTFOUND - no matching key found. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val *data, - MDB_cursor_op op); - - /** @brief Store by cursor. - * - * This function stores key/data pairs into the database. - * The cursor is positioned at the new item, or on failure usually near it. - * @note Earlier documentation incorrectly said errors would leave the - * state of the cursor unchanged. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in] key The key operated on. - * @param[in] data The data operated on. - * @param[in] flags Options for this operation. This parameter - * must be set to 0 or one of the values described here. - *
    - *
  • #MDB_CURRENT - replace the item at the current cursor position. - * The \b key parameter must still be provided, and must match it. - * If using sorted duplicates (#MDB_DUPSORT) the data item must still - * sort into the same place. This is intended to be used when the - * new data is the same size as the old. Otherwise it will simply - * perform a delete of the old record followed by an insert. - *
  • #MDB_NODUPDATA - enter the new key/data pair only if it does not - * already appear in the database. This flag may only be specified - * if the database was opened with #MDB_DUPSORT. The function will - * return #MDB_KEYEXIST if the key/data pair already appears in the - * database. - *
  • #MDB_NOOVERWRITE - enter the new key/data pair only if the key - * does not already appear in the database. The function will return - * #MDB_KEYEXIST if the key already appears in the database, even if - * the database supports duplicates (#MDB_DUPSORT). - *
  • #MDB_RESERVE - reserve space for data of the given size, but - * don't copy the given data. Instead, return a pointer to the - * reserved space, which the caller can fill in later. This saves - * an extra memcpy if the data is being generated later. - *
  • #MDB_APPEND - append the given key/data pair to the end of the - * database. No key comparisons are performed. This option allows - * fast bulk loading when keys are already known to be in the - * correct order. Loading unsorted keys with this flag will cause - * data corruption. - *
  • #MDB_APPENDDUP - as above, but for sorted dup data. - *
  • #MDB_MULTIPLE - store multiple contiguous data elements in a - * single request. This flag may only be specified if the database - * was opened with #MDB_DUPFIXED. The \b data argument must be an - * array of two MDB_vals. The mv_size of the first MDB_val must be - * the size of a single data element. The mv_data of the first MDB_val - * must point to the beginning of the array of contiguous data elements. - * The mv_size of the second MDB_val must be the count of the number - * of data elements to store. On return this field will be set to - * the count of the number of elements actually written. The mv_data - * of the second MDB_val is unused. - *
- * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). - *
  • #MDB_TXN_FULL - the transaction has too many dirty pages. - *
  • EACCES - an attempt was made to modify a read-only database. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_put(MDB_cursor *cursor, MDB_val *key, MDB_val *data, - unsigned int flags); - - /** @brief Delete current key/data pair - * - * This function deletes the key/data pair to which the cursor refers. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in] flags Options for this operation. This parameter - * must be set to 0 or one of the values described here. - *
    - *
  • #MDB_NODUPDATA - delete all of the data items for the current key. - * This flag may only be specified if the database was opened with #MDB_DUPSORT. - *
- * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EACCES - an attempt was made to modify a read-only database. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags); - - /** @brief Return count of duplicates for current key. - * - * This call is only valid on databases that support sorted duplicate - * data items #MDB_DUPSORT. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[out] countp Address where the count will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - cursor is not initialized, or an invalid parameter was specified. - *
- */ -int mdb_cursor_count(MDB_cursor *cursor, size_t *countp); - - /** @brief Compare two data items according to a particular database. - * - * This returns a comparison as if the two data items were keys in the - * specified database. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] a The first item to compare - * @param[in] b The second item to compare - * @return < 0 if a < b, 0 if a == b, > 0 if a > b - */ -int mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); - - /** @brief Compare two data items according to a particular database. - * - * This returns a comparison as if the two items were data items of - * the specified database. The database must have the #MDB_DUPSORT flag. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] a The first item to compare - * @param[in] b The second item to compare - * @return < 0 if a < b, 0 if a == b, > 0 if a > b - */ -int mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); - - /** @brief A callback function used to print a message from the library. - * - * @param[in] msg The string to be printed. - * @param[in] ctx An arbitrary context pointer for the callback. - * @return < 0 on failure, >= 0 on success. - */ -typedef int (MDB_msg_func)(const char *msg, void *ctx); - - /** @brief Dump the entries in the reader lock table. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] func A #MDB_msg_func function - * @param[in] ctx Anything the message function needs - * @return < 0 on failure, >= 0 on success. - */ -int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx); - - /** @brief Check for stale entries in the reader lock table. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] dead Number of stale slots that were cleared - * @return 0 on success, non-zero on failure. - */ -int mdb_reader_check(MDB_env *env, int *dead); -/** @} */ - -#ifdef __cplusplus -} -#endif -/** @page tools LMDB Command Line Tools - The following describes the command line tools that are available for LMDB. - \li \ref mdb_copy_1 - \li \ref mdb_dump_1 - \li \ref mdb_load_1 - \li \ref mdb_stat_1 -*/ - -#endif /* _LMDB_H_ */ diff --git a/vendor/gomdb/mdb.c b/vendor/gomdb/mdb.c deleted file mode 100644 index 4f3bec3..0000000 --- a/vendor/gomdb/mdb.c +++ /dev/null @@ -1,9366 +0,0 @@ -// +build lmdb - -/** @file mdb.c - * @brief Lightning memory-mapped database library - * - * A Btree-based database management library modeled loosely on the - * BerkeleyDB API, but much simplified. - */ -/* - * Copyright 2011-2014 Howard Chu, Symas Corp. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * . - * - * This code is derived from btree.c written by Martin Hedenfalk. - * - * Copyright (c) 2009, 2010 Martin Hedenfalk - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE 1 -#endif -#ifdef _WIN32 -#include -#include -/** getpid() returns int; MinGW defines pid_t but MinGW64 typedefs it - * as int64 which is wrong. MSVC doesn't define it at all, so just - * don't use it. - */ -#define MDB_PID_T int -#define MDB_THR_T DWORD -#include -#include -#ifdef __GNUC__ -# include -#else -# define LITTLE_ENDIAN 1234 -# define BIG_ENDIAN 4321 -# define BYTE_ORDER LITTLE_ENDIAN -# ifndef SSIZE_MAX -# define SSIZE_MAX INT_MAX -# endif -#endif -#else -#include -#include -#define MDB_PID_T pid_t -#define MDB_THR_T pthread_t -#include -#include -#include -#ifdef HAVE_SYS_FILE_H -#include -#endif -#include -#endif - -#if defined(__mips) && defined(__linux) -/* MIPS has cache coherency issues, requires explicit cache control */ -#include -extern int cacheflush(char *addr, int nbytes, int cache); -#define CACHEFLUSH(addr, bytes, cache) cacheflush(addr, bytes, cache) -#else -#define CACHEFLUSH(addr, bytes, cache) -#endif - - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(__sun) -/* Most platforms have posix_memalign, older may only have memalign */ -#define HAVE_MEMALIGN 1 -#include -#endif - -#if !(defined(BYTE_ORDER) || defined(__BYTE_ORDER)) -#include -#include /* defines BYTE_ORDER on HPUX and Solaris */ -#endif - -#if defined(__APPLE__) || defined (BSD) -# define MDB_USE_POSIX_SEM 1 -# define MDB_FDATASYNC fsync -#elif defined(ANDROID) -# define MDB_FDATASYNC fsync -#endif - -#ifndef _WIN32 -#include -#ifdef MDB_USE_POSIX_SEM -# define MDB_USE_HASH 1 -#include -#endif -#endif - -#ifdef USE_VALGRIND -#include -#define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z) -#define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s) -#define VGMEMP_FREE(h,a) VALGRIND_MEMPOOL_FREE(h,a) -#define VGMEMP_DESTROY(h) VALGRIND_DESTROY_MEMPOOL(h) -#define VGMEMP_DEFINED(a,s) VALGRIND_MAKE_MEM_DEFINED(a,s) -#else -#define VGMEMP_CREATE(h,r,z) -#define VGMEMP_ALLOC(h,a,s) -#define VGMEMP_FREE(h,a) -#define VGMEMP_DESTROY(h) -#define VGMEMP_DEFINED(a,s) -#endif - -#ifndef BYTE_ORDER -# if (defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)) -/* Solaris just defines one or the other */ -# define LITTLE_ENDIAN 1234 -# define BIG_ENDIAN 4321 -# ifdef _LITTLE_ENDIAN -# define BYTE_ORDER LITTLE_ENDIAN -# else -# define BYTE_ORDER BIG_ENDIAN -# endif -# else -# define BYTE_ORDER __BYTE_ORDER -# endif -#endif - -#ifndef LITTLE_ENDIAN -#define LITTLE_ENDIAN __LITTLE_ENDIAN -#endif -#ifndef BIG_ENDIAN -#define BIG_ENDIAN __BIG_ENDIAN -#endif - -#if defined(__i386) || defined(__x86_64) || defined(_M_IX86) -#define MISALIGNED_OK 1 -#endif - -#include "lmdb.h" -#include "midl.h" - -#if (BYTE_ORDER == LITTLE_ENDIAN) == (BYTE_ORDER == BIG_ENDIAN) -# error "Unknown or unsupported endianness (BYTE_ORDER)" -#elif (-6 & 5) || CHAR_BIT != 8 || UINT_MAX < 0xffffffff || ULONG_MAX % 0xFFFF -# error "Two's complement, reasonably sized integer types, please" -#endif - -#ifdef __GNUC__ -/** Put infrequently used env functions in separate section */ -# ifdef __APPLE__ -# define ESECT __attribute__ ((section("__TEXT,text_env"))) -# else -# define ESECT __attribute__ ((section("text_env"))) -# endif -#else -#define ESECT -#endif - -/** @defgroup internal LMDB Internals - * @{ - */ -/** @defgroup compat Compatibility Macros - * A bunch of macros to minimize the amount of platform-specific ifdefs - * needed throughout the rest of the code. When the features this library - * needs are similar enough to POSIX to be hidden in a one-or-two line - * replacement, this macro approach is used. - * @{ - */ - - /** Features under development */ -#ifndef MDB_DEVEL -#define MDB_DEVEL 0 -#endif - - /** Wrapper around __func__, which is a C99 feature */ -#if __STDC_VERSION__ >= 199901L -# define mdb_func_ __func__ -#elif __GNUC__ >= 2 || _MSC_VER >= 1300 -# define mdb_func_ __FUNCTION__ -#else -/* If a debug message says (), update the #if statements above */ -# define mdb_func_ "" -#endif - -#ifdef _WIN32 -#define MDB_USE_HASH 1 -#define MDB_PIDLOCK 0 -#define THREAD_RET DWORD -#define pthread_t HANDLE -#define pthread_mutex_t HANDLE -#define pthread_cond_t HANDLE -#define pthread_key_t DWORD -#define pthread_self() GetCurrentThreadId() -#define pthread_key_create(x,y) \ - ((*(x) = TlsAlloc()) == TLS_OUT_OF_INDEXES ? ErrCode() : 0) -#define pthread_key_delete(x) TlsFree(x) -#define pthread_getspecific(x) TlsGetValue(x) -#define pthread_setspecific(x,y) (TlsSetValue(x,y) ? 0 : ErrCode()) -#define pthread_mutex_unlock(x) ReleaseMutex(*x) -#define pthread_mutex_lock(x) WaitForSingleObject(*x, INFINITE) -#define pthread_cond_signal(x) SetEvent(*x) -#define pthread_cond_wait(cond,mutex) do{SignalObjectAndWait(*mutex, *cond, INFINITE, FALSE); WaitForSingleObject(*mutex, INFINITE);}while(0) -#define THREAD_CREATE(thr,start,arg) thr=CreateThread(NULL,0,start,arg,0,NULL) -#define THREAD_FINISH(thr) WaitForSingleObject(thr, INFINITE) -#define LOCK_MUTEX_R(env) pthread_mutex_lock(&(env)->me_rmutex) -#define UNLOCK_MUTEX_R(env) pthread_mutex_unlock(&(env)->me_rmutex) -#define LOCK_MUTEX_W(env) pthread_mutex_lock(&(env)->me_wmutex) -#define UNLOCK_MUTEX_W(env) pthread_mutex_unlock(&(env)->me_wmutex) -#define getpid() GetCurrentProcessId() -#define MDB_FDATASYNC(fd) (!FlushFileBuffers(fd)) -#define MDB_MSYNC(addr,len,flags) (!FlushViewOfFile(addr,len)) -#define ErrCode() GetLastError() -#define GET_PAGESIZE(x) {SYSTEM_INFO si; GetSystemInfo(&si); (x) = si.dwPageSize;} -#define close(fd) (CloseHandle(fd) ? 0 : -1) -#define munmap(ptr,len) UnmapViewOfFile(ptr) -#ifdef PROCESS_QUERY_LIMITED_INFORMATION -#define MDB_PROCESS_QUERY_LIMITED_INFORMATION PROCESS_QUERY_LIMITED_INFORMATION -#else -#define MDB_PROCESS_QUERY_LIMITED_INFORMATION 0x1000 -#endif -#define Z "I" -#else -#define THREAD_RET void * -#define THREAD_CREATE(thr,start,arg) pthread_create(&thr,NULL,start,arg) -#define THREAD_FINISH(thr) pthread_join(thr,NULL) -#define Z "z" /**< printf format modifier for size_t */ - - /** For MDB_LOCK_FORMAT: True if readers take a pid lock in the lockfile */ -#define MDB_PIDLOCK 1 - -#ifdef MDB_USE_POSIX_SEM - -#define LOCK_MUTEX_R(env) mdb_sem_wait((env)->me_rmutex) -#define UNLOCK_MUTEX_R(env) sem_post((env)->me_rmutex) -#define LOCK_MUTEX_W(env) mdb_sem_wait((env)->me_wmutex) -#define UNLOCK_MUTEX_W(env) sem_post((env)->me_wmutex) - -static int -mdb_sem_wait(sem_t *sem) -{ - int rc; - while ((rc = sem_wait(sem)) && (rc = errno) == EINTR) ; - return rc; -} - -#else - /** Lock the reader mutex. - */ -#define LOCK_MUTEX_R(env) pthread_mutex_lock(&(env)->me_txns->mti_mutex) - /** Unlock the reader mutex. - */ -#define UNLOCK_MUTEX_R(env) pthread_mutex_unlock(&(env)->me_txns->mti_mutex) - - /** Lock the writer mutex. - * Only a single write transaction is allowed at a time. Other writers - * will block waiting for this mutex. - */ -#define LOCK_MUTEX_W(env) pthread_mutex_lock(&(env)->me_txns->mti_wmutex) - /** Unlock the writer mutex. - */ -#define UNLOCK_MUTEX_W(env) pthread_mutex_unlock(&(env)->me_txns->mti_wmutex) -#endif /* MDB_USE_POSIX_SEM */ - - /** Get the error code for the last failed system function. - */ -#define ErrCode() errno - - /** An abstraction for a file handle. - * On POSIX systems file handles are small integers. On Windows - * they're opaque pointers. - */ -#define HANDLE int - - /** A value for an invalid file handle. - * Mainly used to initialize file variables and signify that they are - * unused. - */ -#define INVALID_HANDLE_VALUE (-1) - - /** Get the size of a memory page for the system. - * This is the basic size that the platform's memory manager uses, and is - * fundamental to the use of memory-mapped files. - */ -#define GET_PAGESIZE(x) ((x) = sysconf(_SC_PAGE_SIZE)) -#endif - -#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) -#define MNAME_LEN 32 -#else -#define MNAME_LEN (sizeof(pthread_mutex_t)) -#endif - -/** @} */ - -#ifndef _WIN32 -/** A flag for opening a file and requesting synchronous data writes. - * This is only used when writing a meta page. It's not strictly needed; - * we could just do a normal write and then immediately perform a flush. - * But if this flag is available it saves us an extra system call. - * - * @note If O_DSYNC is undefined but exists in /usr/include, - * preferably set some compiler flag to get the definition. - * Otherwise compile with the less efficient -DMDB_DSYNC=O_SYNC. - */ -#ifndef MDB_DSYNC -# define MDB_DSYNC O_DSYNC -#endif -#endif - -/** Function for flushing the data of a file. Define this to fsync - * if fdatasync() is not supported. - */ -#ifndef MDB_FDATASYNC -# define MDB_FDATASYNC fdatasync -#endif - -#ifndef MDB_MSYNC -# define MDB_MSYNC(addr,len,flags) msync(addr,len,flags) -#endif - -#ifndef MS_SYNC -#define MS_SYNC 1 -#endif - -#ifndef MS_ASYNC -#define MS_ASYNC 0 -#endif - - /** A page number in the database. - * Note that 64 bit page numbers are overkill, since pages themselves - * already represent 12-13 bits of addressable memory, and the OS will - * always limit applications to a maximum of 63 bits of address space. - * - * @note In the #MDB_node structure, we only store 48 bits of this value, - * which thus limits us to only 60 bits of addressable data. - */ -typedef MDB_ID pgno_t; - - /** A transaction ID. - * See struct MDB_txn.mt_txnid for details. - */ -typedef MDB_ID txnid_t; - -/** @defgroup debug Debug Macros - * @{ - */ -#ifndef MDB_DEBUG - /** Enable debug output. Needs variable argument macros (a C99 feature). - * Set this to 1 for copious tracing. Set to 2 to add dumps of all IDLs - * read from and written to the database (used for free space management). - */ -#define MDB_DEBUG 0 -#endif - -#if MDB_DEBUG -static int mdb_debug; -static txnid_t mdb_debug_start; - - /** Print a debug message with printf formatting. - * Requires double parenthesis around 2 or more args. - */ -# define DPRINTF(args) ((void) ((mdb_debug) && DPRINTF0 args)) -# define DPRINTF0(fmt, ...) \ - fprintf(stderr, "%s:%d " fmt "\n", mdb_func_, __LINE__, __VA_ARGS__) -#else -# define DPRINTF(args) ((void) 0) -#endif - /** Print a debug string. - * The string is printed literally, with no format processing. - */ -#define DPUTS(arg) DPRINTF(("%s", arg)) - /** Debuging output value of a cursor DBI: Negative in a sub-cursor. */ -#define DDBI(mc) \ - (((mc)->mc_flags & C_SUB) ? -(int)(mc)->mc_dbi : (int)(mc)->mc_dbi) -/** @} */ - - /** @brief The maximum size of a database page. - * - * It is 32k or 64k, since value-PAGEBASE must fit in - * #MDB_page.%mp_upper. - * - * LMDB will use database pages < OS pages if needed. - * That causes more I/O in write transactions: The OS must - * know (read) the whole page before writing a partial page. - * - * Note that we don't currently support Huge pages. On Linux, - * regular data files cannot use Huge pages, and in general - * Huge pages aren't actually pageable. We rely on the OS - * demand-pager to read our data and page it out when memory - * pressure from other processes is high. So until OSs have - * actual paging support for Huge pages, they're not viable. - */ -#define MAX_PAGESIZE (PAGEBASE ? 0x10000 : 0x8000) - - /** The minimum number of keys required in a database page. - * Setting this to a larger value will place a smaller bound on the - * maximum size of a data item. Data items larger than this size will - * be pushed into overflow pages instead of being stored directly in - * the B-tree node. This value used to default to 4. With a page size - * of 4096 bytes that meant that any item larger than 1024 bytes would - * go into an overflow page. That also meant that on average 2-3KB of - * each overflow page was wasted space. The value cannot be lower than - * 2 because then there would no longer be a tree structure. With this - * value, items larger than 2KB will go into overflow pages, and on - * average only 1KB will be wasted. - */ -#define MDB_MINKEYS 2 - - /** A stamp that identifies a file as an LMDB file. - * There's nothing special about this value other than that it is easily - * recognizable, and it will reflect any byte order mismatches. - */ -#define MDB_MAGIC 0xBEEFC0DE - - /** The version number for a database's datafile format. */ -#define MDB_DATA_VERSION ((MDB_DEVEL) ? 999 : 1) - /** The version number for a database's lockfile format. */ -#define MDB_LOCK_VERSION 1 - - /** @brief The max size of a key we can write, or 0 for dynamic max. - * - * Define this as 0 to compute the max from the page size. 511 - * is default for backwards compat: liblmdb <= 0.9.10 can break - * when modifying a DB with keys/dupsort data bigger than its max. - * #MDB_DEVEL sets the default to 0. - * - * Data items in an #MDB_DUPSORT database are also limited to - * this size, since they're actually keys of a sub-DB. Keys and - * #MDB_DUPSORT data items must fit on a node in a regular page. - */ -#ifndef MDB_MAXKEYSIZE -#define MDB_MAXKEYSIZE ((MDB_DEVEL) ? 0 : 511) -#endif - - /** The maximum size of a key we can write to the environment. */ -#if MDB_MAXKEYSIZE -#define ENV_MAXKEY(env) (MDB_MAXKEYSIZE) -#else -#define ENV_MAXKEY(env) ((env)->me_maxkey) -#endif - - /** @brief The maximum size of a data item. - * - * We only store a 32 bit value for node sizes. - */ -#define MAXDATASIZE 0xffffffffUL - -#if MDB_DEBUG - /** Key size which fits in a #DKBUF. - * @ingroup debug - */ -#define DKBUF_MAXKEYSIZE ((MDB_MAXKEYSIZE) > 0 ? (MDB_MAXKEYSIZE) : 511) - /** A key buffer. - * @ingroup debug - * This is used for printing a hex dump of a key's contents. - */ -#define DKBUF char kbuf[DKBUF_MAXKEYSIZE*2+1] - /** Display a key in hex. - * @ingroup debug - * Invoke a function to display a key in hex. - */ -#define DKEY(x) mdb_dkey(x, kbuf) -#else -#define DKBUF -#define DKEY(x) 0 -#endif - - /** An invalid page number. - * Mainly used to denote an empty tree. - */ -#define P_INVALID (~(pgno_t)0) - - /** Test if the flags \b f are set in a flag word \b w. */ -#define F_ISSET(w, f) (((w) & (f)) == (f)) - - /** Round \b n up to an even number. */ -#define EVEN(n) (((n) + 1U) & -2) /* sign-extending -2 to match n+1U */ - - /** Used for offsets within a single page. - * Since memory pages are typically 4 or 8KB in size, 12-13 bits, - * this is plenty. - */ -typedef uint16_t indx_t; - - /** Default size of memory map. - * This is certainly too small for any actual applications. Apps should always set - * the size explicitly using #mdb_env_set_mapsize(). - */ -#define DEFAULT_MAPSIZE 1048576 - -/** @defgroup readers Reader Lock Table - * Readers don't acquire any locks for their data access. Instead, they - * simply record their transaction ID in the reader table. The reader - * mutex is needed just to find an empty slot in the reader table. The - * slot's address is saved in thread-specific data so that subsequent read - * transactions started by the same thread need no further locking to proceed. - * - * If #MDB_NOTLS is set, the slot address is not saved in thread-specific data. - * - * No reader table is used if the database is on a read-only filesystem, or - * if #MDB_NOLOCK is set. - * - * Since the database uses multi-version concurrency control, readers don't - * actually need any locking. This table is used to keep track of which - * readers are using data from which old transactions, so that we'll know - * when a particular old transaction is no longer in use. Old transactions - * that have discarded any data pages can then have those pages reclaimed - * for use by a later write transaction. - * - * The lock table is constructed such that reader slots are aligned with the - * processor's cache line size. Any slot is only ever used by one thread. - * This alignment guarantees that there will be no contention or cache - * thrashing as threads update their own slot info, and also eliminates - * any need for locking when accessing a slot. - * - * A writer thread will scan every slot in the table to determine the oldest - * outstanding reader transaction. Any freed pages older than this will be - * reclaimed by the writer. The writer doesn't use any locks when scanning - * this table. This means that there's no guarantee that the writer will - * see the most up-to-date reader info, but that's not required for correct - * operation - all we need is to know the upper bound on the oldest reader, - * we don't care at all about the newest reader. So the only consequence of - * reading stale information here is that old pages might hang around a - * while longer before being reclaimed. That's actually good anyway, because - * the longer we delay reclaiming old pages, the more likely it is that a - * string of contiguous pages can be found after coalescing old pages from - * many old transactions together. - * @{ - */ - /** Number of slots in the reader table. - * This value was chosen somewhat arbitrarily. 126 readers plus a - * couple mutexes fit exactly into 8KB on my development machine. - * Applications should set the table size using #mdb_env_set_maxreaders(). - */ -#define DEFAULT_READERS 126 - - /** The size of a CPU cache line in bytes. We want our lock structures - * aligned to this size to avoid false cache line sharing in the - * lock table. - * This value works for most CPUs. For Itanium this should be 128. - */ -#ifndef CACHELINE -#define CACHELINE 64 -#endif - - /** The information we store in a single slot of the reader table. - * In addition to a transaction ID, we also record the process and - * thread ID that owns a slot, so that we can detect stale information, - * e.g. threads or processes that went away without cleaning up. - * @note We currently don't check for stale records. We simply re-init - * the table when we know that we're the only process opening the - * lock file. - */ -typedef struct MDB_rxbody { - /** Current Transaction ID when this transaction began, or (txnid_t)-1. - * Multiple readers that start at the same time will probably have the - * same ID here. Again, it's not important to exclude them from - * anything; all we need to know is which version of the DB they - * started from so we can avoid overwriting any data used in that - * particular version. - */ - txnid_t mrb_txnid; - /** The process ID of the process owning this reader txn. */ - MDB_PID_T mrb_pid; - /** The thread ID of the thread owning this txn. */ - MDB_THR_T mrb_tid; -} MDB_rxbody; - - /** The actual reader record, with cacheline padding. */ -typedef struct MDB_reader { - union { - MDB_rxbody mrx; - /** shorthand for mrb_txnid */ -#define mr_txnid mru.mrx.mrb_txnid -#define mr_pid mru.mrx.mrb_pid -#define mr_tid mru.mrx.mrb_tid - /** cache line alignment */ - char pad[(sizeof(MDB_rxbody)+CACHELINE-1) & ~(CACHELINE-1)]; - } mru; -} MDB_reader; - - /** The header for the reader table. - * The table resides in a memory-mapped file. (This is a different file - * than is used for the main database.) - * - * For POSIX the actual mutexes reside in the shared memory of this - * mapped file. On Windows, mutexes are named objects allocated by the - * kernel; we store the mutex names in this mapped file so that other - * processes can grab them. This same approach is also used on - * MacOSX/Darwin (using named semaphores) since MacOSX doesn't support - * process-shared POSIX mutexes. For these cases where a named object - * is used, the object name is derived from a 64 bit FNV hash of the - * environment pathname. As such, naming collisions are extremely - * unlikely. If a collision occurs, the results are unpredictable. - */ -typedef struct MDB_txbody { - /** Stamp identifying this as an LMDB file. It must be set - * to #MDB_MAGIC. */ - uint32_t mtb_magic; - /** Format of this lock file. Must be set to #MDB_LOCK_FORMAT. */ - uint32_t mtb_format; -#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) - char mtb_rmname[MNAME_LEN]; -#else - /** Mutex protecting access to this table. - * This is the reader lock that #LOCK_MUTEX_R acquires. - */ - pthread_mutex_t mtb_mutex; -#endif - /** The ID of the last transaction committed to the database. - * This is recorded here only for convenience; the value can always - * be determined by reading the main database meta pages. - */ - txnid_t mtb_txnid; - /** The number of slots that have been used in the reader table. - * This always records the maximum count, it is not decremented - * when readers release their slots. - */ - unsigned mtb_numreaders; -} MDB_txbody; - - /** The actual reader table definition. */ -typedef struct MDB_txninfo { - union { - MDB_txbody mtb; -#define mti_magic mt1.mtb.mtb_magic -#define mti_format mt1.mtb.mtb_format -#define mti_mutex mt1.mtb.mtb_mutex -#define mti_rmname mt1.mtb.mtb_rmname -#define mti_txnid mt1.mtb.mtb_txnid -#define mti_numreaders mt1.mtb.mtb_numreaders - char pad[(sizeof(MDB_txbody)+CACHELINE-1) & ~(CACHELINE-1)]; - } mt1; - union { -#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) - char mt2_wmname[MNAME_LEN]; -#define mti_wmname mt2.mt2_wmname -#else - pthread_mutex_t mt2_wmutex; -#define mti_wmutex mt2.mt2_wmutex -#endif - char pad[(MNAME_LEN+CACHELINE-1) & ~(CACHELINE-1)]; - } mt2; - MDB_reader mti_readers[1]; -} MDB_txninfo; - - /** Lockfile format signature: version, features and field layout */ -#define MDB_LOCK_FORMAT \ - ((uint32_t) \ - ((MDB_LOCK_VERSION) \ - /* Flags which describe functionality */ \ - + (((MDB_PIDLOCK) != 0) << 16))) -/** @} */ - -/** Common header for all page types. - * Overflow records occupy a number of contiguous pages with no - * headers on any page after the first. - */ -typedef struct MDB_page { -#define mp_pgno mp_p.p_pgno -#define mp_next mp_p.p_next - union { - pgno_t p_pgno; /**< page number */ - struct MDB_page *p_next; /**< for in-memory list of freed pages */ - } mp_p; - uint16_t mp_pad; -/** @defgroup mdb_page Page Flags - * @ingroup internal - * Flags for the page headers. - * @{ - */ -#define P_BRANCH 0x01 /**< branch page */ -#define P_LEAF 0x02 /**< leaf page */ -#define P_OVERFLOW 0x04 /**< overflow page */ -#define P_META 0x08 /**< meta page */ -#define P_DIRTY 0x10 /**< dirty page, also set for #P_SUBP pages */ -#define P_LEAF2 0x20 /**< for #MDB_DUPFIXED records */ -#define P_SUBP 0x40 /**< for #MDB_DUPSORT sub-pages */ -#define P_LOOSE 0x4000 /**< page was dirtied then freed, can be reused */ -#define P_KEEP 0x8000 /**< leave this page alone during spill */ -/** @} */ - uint16_t mp_flags; /**< @ref mdb_page */ -#define mp_lower mp_pb.pb.pb_lower -#define mp_upper mp_pb.pb.pb_upper -#define mp_pages mp_pb.pb_pages - union { - struct { - indx_t pb_lower; /**< lower bound of free space */ - indx_t pb_upper; /**< upper bound of free space */ - } pb; - uint32_t pb_pages; /**< number of overflow pages */ - } mp_pb; - indx_t mp_ptrs[1]; /**< dynamic size */ -} MDB_page; - - /** Size of the page header, excluding dynamic data at the end */ -#define PAGEHDRSZ ((unsigned) offsetof(MDB_page, mp_ptrs)) - - /** Address of first usable data byte in a page, after the header */ -#define METADATA(p) ((void *)((char *)(p) + PAGEHDRSZ)) - - /** ITS#7713, change PAGEBASE to handle 65536 byte pages */ -#define PAGEBASE ((MDB_DEVEL) ? PAGEHDRSZ : 0) - - /** Number of nodes on a page */ -#define NUMKEYS(p) (((p)->mp_lower - (PAGEHDRSZ-PAGEBASE)) >> 1) - - /** The amount of space remaining in the page */ -#define SIZELEFT(p) (indx_t)((p)->mp_upper - (p)->mp_lower) - - /** The percentage of space used in the page, in tenths of a percent. */ -#define PAGEFILL(env, p) (1000L * ((env)->me_psize - PAGEHDRSZ - SIZELEFT(p)) / \ - ((env)->me_psize - PAGEHDRSZ)) - /** The minimum page fill factor, in tenths of a percent. - * Pages emptier than this are candidates for merging. - */ -#define FILL_THRESHOLD 250 - - /** Test if a page is a leaf page */ -#define IS_LEAF(p) F_ISSET((p)->mp_flags, P_LEAF) - /** Test if a page is a LEAF2 page */ -#define IS_LEAF2(p) F_ISSET((p)->mp_flags, P_LEAF2) - /** Test if a page is a branch page */ -#define IS_BRANCH(p) F_ISSET((p)->mp_flags, P_BRANCH) - /** Test if a page is an overflow page */ -#define IS_OVERFLOW(p) F_ISSET((p)->mp_flags, P_OVERFLOW) - /** Test if a page is a sub page */ -#define IS_SUBP(p) F_ISSET((p)->mp_flags, P_SUBP) - - /** The number of overflow pages needed to store the given size. */ -#define OVPAGES(size, psize) ((PAGEHDRSZ-1 + (size)) / (psize) + 1) - - /** Link in #MDB_txn.%mt_loose_pgs list */ -#define NEXT_LOOSE_PAGE(p) (*(MDB_page **)((p) + 2)) - - /** Header for a single key/data pair within a page. - * Used in pages of type #P_BRANCH and #P_LEAF without #P_LEAF2. - * We guarantee 2-byte alignment for 'MDB_node's. - */ -typedef struct MDB_node { - /** lo and hi are used for data size on leaf nodes and for - * child pgno on branch nodes. On 64 bit platforms, flags - * is also used for pgno. (Branch nodes have no flags). - * They are in host byte order in case that lets some - * accesses be optimized into a 32-bit word access. - */ -#if BYTE_ORDER == LITTLE_ENDIAN - unsigned short mn_lo, mn_hi; /**< part of data size or pgno */ -#else - unsigned short mn_hi, mn_lo; -#endif -/** @defgroup mdb_node Node Flags - * @ingroup internal - * Flags for node headers. - * @{ - */ -#define F_BIGDATA 0x01 /**< data put on overflow page */ -#define F_SUBDATA 0x02 /**< data is a sub-database */ -#define F_DUPDATA 0x04 /**< data has duplicates */ - -/** valid flags for #mdb_node_add() */ -#define NODE_ADD_FLAGS (F_DUPDATA|F_SUBDATA|MDB_RESERVE|MDB_APPEND) - -/** @} */ - unsigned short mn_flags; /**< @ref mdb_node */ - unsigned short mn_ksize; /**< key size */ - char mn_data[1]; /**< key and data are appended here */ -} MDB_node; - - /** Size of the node header, excluding dynamic data at the end */ -#define NODESIZE offsetof(MDB_node, mn_data) - - /** Bit position of top word in page number, for shifting mn_flags */ -#define PGNO_TOPWORD ((pgno_t)-1 > 0xffffffffu ? 32 : 0) - - /** Size of a node in a branch page with a given key. - * This is just the node header plus the key, there is no data. - */ -#define INDXSIZE(k) (NODESIZE + ((k) == NULL ? 0 : (k)->mv_size)) - - /** Size of a node in a leaf page with a given key and data. - * This is node header plus key plus data size. - */ -#define LEAFSIZE(k, d) (NODESIZE + (k)->mv_size + (d)->mv_size) - - /** Address of node \b i in page \b p */ -#define NODEPTR(p, i) ((MDB_node *)((char *)(p) + (p)->mp_ptrs[i] + PAGEBASE)) - - /** Address of the key for the node */ -#define NODEKEY(node) (void *)((node)->mn_data) - - /** Address of the data for a node */ -#define NODEDATA(node) (void *)((char *)(node)->mn_data + (node)->mn_ksize) - - /** Get the page number pointed to by a branch node */ -#define NODEPGNO(node) \ - ((node)->mn_lo | ((pgno_t) (node)->mn_hi << 16) | \ - (PGNO_TOPWORD ? ((pgno_t) (node)->mn_flags << PGNO_TOPWORD) : 0)) - /** Set the page number in a branch node */ -#define SETPGNO(node,pgno) do { \ - (node)->mn_lo = (pgno) & 0xffff; (node)->mn_hi = (pgno) >> 16; \ - if (PGNO_TOPWORD) (node)->mn_flags = (pgno) >> PGNO_TOPWORD; } while(0) - - /** Get the size of the data in a leaf node */ -#define NODEDSZ(node) ((node)->mn_lo | ((unsigned)(node)->mn_hi << 16)) - /** Set the size of the data for a leaf node */ -#define SETDSZ(node,size) do { \ - (node)->mn_lo = (size) & 0xffff; (node)->mn_hi = (size) >> 16;} while(0) - /** The size of a key in a node */ -#define NODEKSZ(node) ((node)->mn_ksize) - - /** Copy a page number from src to dst */ -#ifdef MISALIGNED_OK -#define COPY_PGNO(dst,src) dst = src -#else -#if SIZE_MAX > 4294967295UL -#define COPY_PGNO(dst,src) do { \ - unsigned short *s, *d; \ - s = (unsigned short *)&(src); \ - d = (unsigned short *)&(dst); \ - *d++ = *s++; \ - *d++ = *s++; \ - *d++ = *s++; \ - *d = *s; \ -} while (0) -#else -#define COPY_PGNO(dst,src) do { \ - unsigned short *s, *d; \ - s = (unsigned short *)&(src); \ - d = (unsigned short *)&(dst); \ - *d++ = *s++; \ - *d = *s; \ -} while (0) -#endif -#endif - /** The address of a key in a LEAF2 page. - * LEAF2 pages are used for #MDB_DUPFIXED sorted-duplicate sub-DBs. - * There are no node headers, keys are stored contiguously. - */ -#define LEAF2KEY(p, i, ks) ((char *)(p) + PAGEHDRSZ + ((i)*(ks))) - - /** Set the \b node's key into \b keyptr, if requested. */ -#define MDB_GET_KEY(node, keyptr) { if ((keyptr) != NULL) { \ - (keyptr)->mv_size = NODEKSZ(node); (keyptr)->mv_data = NODEKEY(node); } } - - /** Set the \b node's key into \b key. */ -#define MDB_GET_KEY2(node, key) { key.mv_size = NODEKSZ(node); key.mv_data = NODEKEY(node); } - - /** Information about a single database in the environment. */ -typedef struct MDB_db { - uint32_t md_pad; /**< also ksize for LEAF2 pages */ - uint16_t md_flags; /**< @ref mdb_dbi_open */ - uint16_t md_depth; /**< depth of this tree */ - pgno_t md_branch_pages; /**< number of internal pages */ - pgno_t md_leaf_pages; /**< number of leaf pages */ - pgno_t md_overflow_pages; /**< number of overflow pages */ - size_t md_entries; /**< number of data items */ - pgno_t md_root; /**< the root page of this tree */ -} MDB_db; - - /** mdb_dbi_open flags */ -#define MDB_VALID 0x8000 /**< DB handle is valid, for me_dbflags */ -#define PERSISTENT_FLAGS (0xffff & ~(MDB_VALID)) -#define VALID_FLAGS (MDB_REVERSEKEY|MDB_DUPSORT|MDB_INTEGERKEY|MDB_DUPFIXED|\ - MDB_INTEGERDUP|MDB_REVERSEDUP|MDB_CREATE) - - /** Handle for the DB used to track free pages. */ -#define FREE_DBI 0 - /** Handle for the default DB. */ -#define MAIN_DBI 1 - - /** Meta page content. - * A meta page is the start point for accessing a database snapshot. - * Pages 0-1 are meta pages. Transaction N writes meta page #(N % 2). - */ -typedef struct MDB_meta { - /** Stamp identifying this as an LMDB file. It must be set - * to #MDB_MAGIC. */ - uint32_t mm_magic; - /** Version number of this lock file. Must be set to #MDB_DATA_VERSION. */ - uint32_t mm_version; - void *mm_address; /**< address for fixed mapping */ - size_t mm_mapsize; /**< size of mmap region */ - MDB_db mm_dbs[2]; /**< first is free space, 2nd is main db */ - /** The size of pages used in this DB */ -#define mm_psize mm_dbs[0].md_pad - /** Any persistent environment flags. @ref mdb_env */ -#define mm_flags mm_dbs[0].md_flags - pgno_t mm_last_pg; /**< last used page in file */ - txnid_t mm_txnid; /**< txnid that committed this page */ -} MDB_meta; - - /** Buffer for a stack-allocated meta page. - * The members define size and alignment, and silence type - * aliasing warnings. They are not used directly; that could - * mean incorrectly using several union members in parallel. - */ -typedef union MDB_metabuf { - MDB_page mb_page; - struct { - char mm_pad[PAGEHDRSZ]; - MDB_meta mm_meta; - } mb_metabuf; -} MDB_metabuf; - - /** Auxiliary DB info. - * The information here is mostly static/read-only. There is - * only a single copy of this record in the environment. - */ -typedef struct MDB_dbx { - MDB_val md_name; /**< name of the database */ - MDB_cmp_func *md_cmp; /**< function for comparing keys */ - MDB_cmp_func *md_dcmp; /**< function for comparing data items */ - MDB_rel_func *md_rel; /**< user relocate function */ - void *md_relctx; /**< user-provided context for md_rel */ -} MDB_dbx; - - /** A database transaction. - * Every operation requires a transaction handle. - */ -struct MDB_txn { - MDB_txn *mt_parent; /**< parent of a nested txn */ - MDB_txn *mt_child; /**< nested txn under this txn */ - pgno_t mt_next_pgno; /**< next unallocated page */ - /** The ID of this transaction. IDs are integers incrementing from 1. - * Only committed write transactions increment the ID. If a transaction - * aborts, the ID may be re-used by the next writer. - */ - txnid_t mt_txnid; - MDB_env *mt_env; /**< the DB environment */ - /** The list of pages that became unused during this transaction. - */ - MDB_IDL mt_free_pgs; - /** The list of loose pages that became unused and may be reused - * in this transaction, linked through #NEXT_LOOSE_PAGE(page). - */ - MDB_page *mt_loose_pgs; - /* #Number of loose pages (#mt_loose_pgs) */ - int mt_loose_count; - /** The sorted list of dirty pages we temporarily wrote to disk - * because the dirty list was full. page numbers in here are - * shifted left by 1, deleted slots have the LSB set. - */ - MDB_IDL mt_spill_pgs; - union { - /** For write txns: Modified pages. Sorted when not MDB_WRITEMAP. */ - MDB_ID2L dirty_list; - /** For read txns: This thread/txn's reader table slot, or NULL. */ - MDB_reader *reader; - } mt_u; - /** Array of records for each DB known in the environment. */ - MDB_dbx *mt_dbxs; - /** Array of MDB_db records for each known DB */ - MDB_db *mt_dbs; - /** Array of sequence numbers for each DB handle */ - unsigned int *mt_dbiseqs; -/** @defgroup mt_dbflag Transaction DB Flags - * @ingroup internal - * @{ - */ -#define DB_DIRTY 0x01 /**< DB was modified or is DUPSORT data */ -#define DB_STALE 0x02 /**< Named-DB record is older than txnID */ -#define DB_NEW 0x04 /**< Named-DB handle opened in this txn */ -#define DB_VALID 0x08 /**< DB handle is valid, see also #MDB_VALID */ -/** @} */ - /** In write txns, array of cursors for each DB */ - MDB_cursor **mt_cursors; - /** Array of flags for each DB */ - unsigned char *mt_dbflags; - /** Number of DB records in use. This number only ever increments; - * we don't decrement it when individual DB handles are closed. - */ - MDB_dbi mt_numdbs; - -/** @defgroup mdb_txn Transaction Flags - * @ingroup internal - * @{ - */ -#define MDB_TXN_RDONLY 0x01 /**< read-only transaction */ -#define MDB_TXN_ERROR 0x02 /**< txn is unusable after an error */ -#define MDB_TXN_DIRTY 0x04 /**< must write, even if dirty list is empty */ -#define MDB_TXN_SPILLS 0x08 /**< txn or a parent has spilled pages */ -/** @} */ - unsigned int mt_flags; /**< @ref mdb_txn */ - /** #dirty_list room: Array size - \#dirty pages visible to this txn. - * Includes ancestor txns' dirty pages not hidden by other txns' - * dirty/spilled pages. Thus commit(nested txn) has room to merge - * dirty_list into mt_parent after freeing hidden mt_parent pages. - */ - unsigned int mt_dirty_room; -}; - -/** Enough space for 2^32 nodes with minimum of 2 keys per node. I.e., plenty. - * At 4 keys per node, enough for 2^64 nodes, so there's probably no need to - * raise this on a 64 bit machine. - */ -#define CURSOR_STACK 32 - -struct MDB_xcursor; - - /** Cursors are used for all DB operations. - * A cursor holds a path of (page pointer, key index) from the DB - * root to a position in the DB, plus other state. #MDB_DUPSORT - * cursors include an xcursor to the current data item. Write txns - * track their cursors and keep them up to date when data moves. - * Exception: An xcursor's pointer to a #P_SUBP page can be stale. - * (A node with #F_DUPDATA but no #F_SUBDATA contains a subpage). - */ -struct MDB_cursor { - /** Next cursor on this DB in this txn */ - MDB_cursor *mc_next; - /** Backup of the original cursor if this cursor is a shadow */ - MDB_cursor *mc_backup; - /** Context used for databases with #MDB_DUPSORT, otherwise NULL */ - struct MDB_xcursor *mc_xcursor; - /** The transaction that owns this cursor */ - MDB_txn *mc_txn; - /** The database handle this cursor operates on */ - MDB_dbi mc_dbi; - /** The database record for this cursor */ - MDB_db *mc_db; - /** The database auxiliary record for this cursor */ - MDB_dbx *mc_dbx; - /** The @ref mt_dbflag for this database */ - unsigned char *mc_dbflag; - unsigned short mc_snum; /**< number of pushed pages */ - unsigned short mc_top; /**< index of top page, normally mc_snum-1 */ -/** @defgroup mdb_cursor Cursor Flags - * @ingroup internal - * Cursor state flags. - * @{ - */ -#define C_INITIALIZED 0x01 /**< cursor has been initialized and is valid */ -#define C_EOF 0x02 /**< No more data */ -#define C_SUB 0x04 /**< Cursor is a sub-cursor */ -#define C_DEL 0x08 /**< last op was a cursor_del */ -#define C_SPLITTING 0x20 /**< Cursor is in page_split */ -#define C_UNTRACK 0x40 /**< Un-track cursor when closing */ -/** @} */ - unsigned int mc_flags; /**< @ref mdb_cursor */ - MDB_page *mc_pg[CURSOR_STACK]; /**< stack of pushed pages */ - indx_t mc_ki[CURSOR_STACK]; /**< stack of page indices */ -}; - - /** Context for sorted-dup records. - * We could have gone to a fully recursive design, with arbitrarily - * deep nesting of sub-databases. But for now we only handle these - * levels - main DB, optional sub-DB, sorted-duplicate DB. - */ -typedef struct MDB_xcursor { - /** A sub-cursor for traversing the Dup DB */ - MDB_cursor mx_cursor; - /** The database record for this Dup DB */ - MDB_db mx_db; - /** The auxiliary DB record for this Dup DB */ - MDB_dbx mx_dbx; - /** The @ref mt_dbflag for this Dup DB */ - unsigned char mx_dbflag; -} MDB_xcursor; - - /** State of FreeDB old pages, stored in the MDB_env */ -typedef struct MDB_pgstate { - pgno_t *mf_pghead; /**< Reclaimed freeDB pages, or NULL before use */ - txnid_t mf_pglast; /**< ID of last used record, or 0 if !mf_pghead */ -} MDB_pgstate; - - /** The database environment. */ -struct MDB_env { - HANDLE me_fd; /**< The main data file */ - HANDLE me_lfd; /**< The lock file */ - HANDLE me_mfd; /**< just for writing the meta pages */ - /** Failed to update the meta page. Probably an I/O error. */ -#define MDB_FATAL_ERROR 0x80000000U - /** Some fields are initialized. */ -#define MDB_ENV_ACTIVE 0x20000000U - /** me_txkey is set */ -#define MDB_ENV_TXKEY 0x10000000U - uint32_t me_flags; /**< @ref mdb_env */ - unsigned int me_psize; /**< DB page size, inited from me_os_psize */ - unsigned int me_os_psize; /**< OS page size, from #GET_PAGESIZE */ - unsigned int me_maxreaders; /**< size of the reader table */ - unsigned int me_numreaders; /**< max numreaders set by this env */ - MDB_dbi me_numdbs; /**< number of DBs opened */ - MDB_dbi me_maxdbs; /**< size of the DB table */ - MDB_PID_T me_pid; /**< process ID of this env */ - char *me_path; /**< path to the DB files */ - char *me_map; /**< the memory map of the data file */ - MDB_txninfo *me_txns; /**< the memory map of the lock file or NULL */ - MDB_meta *me_metas[2]; /**< pointers to the two meta pages */ - void *me_pbuf; /**< scratch area for DUPSORT put() */ - MDB_txn *me_txn; /**< current write transaction */ - size_t me_mapsize; /**< size of the data memory map */ - off_t me_size; /**< current file size */ - pgno_t me_maxpg; /**< me_mapsize / me_psize */ - MDB_dbx *me_dbxs; /**< array of static DB info */ - uint16_t *me_dbflags; /**< array of flags from MDB_db.md_flags */ - unsigned int *me_dbiseqs; /**< array of dbi sequence numbers */ - pthread_key_t me_txkey; /**< thread-key for readers */ - MDB_pgstate me_pgstate; /**< state of old pages from freeDB */ -# define me_pglast me_pgstate.mf_pglast -# define me_pghead me_pgstate.mf_pghead - MDB_page *me_dpages; /**< list of malloc'd blocks for re-use */ - /** IDL of pages that became unused in a write txn */ - MDB_IDL me_free_pgs; - /** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */ - MDB_ID2L me_dirty_list; - /** Max number of freelist items that can fit in a single overflow page */ - int me_maxfree_1pg; - /** Max size of a node on a page */ - unsigned int me_nodemax; -#if !(MDB_MAXKEYSIZE) - unsigned int me_maxkey; /**< max size of a key */ -#endif - int me_live_reader; /**< have liveness lock in reader table */ -#ifdef _WIN32 - int me_pidquery; /**< Used in OpenProcess */ - HANDLE me_rmutex; /* Windows mutexes don't reside in shared mem */ - HANDLE me_wmutex; -#elif defined(MDB_USE_POSIX_SEM) - sem_t *me_rmutex; /* Shared mutexes are not supported */ - sem_t *me_wmutex; -#endif - void *me_userctx; /**< User-settable context */ - MDB_assert_func *me_assert_func; /**< Callback for assertion failures */ -}; - - /** Nested transaction */ -typedef struct MDB_ntxn { - MDB_txn mnt_txn; /**< the transaction */ - MDB_pgstate mnt_pgstate; /**< parent transaction's saved freestate */ -} MDB_ntxn; - - /** max number of pages to commit in one writev() call */ -#define MDB_COMMIT_PAGES 64 -#if defined(IOV_MAX) && IOV_MAX < MDB_COMMIT_PAGES -#undef MDB_COMMIT_PAGES -#define MDB_COMMIT_PAGES IOV_MAX -#endif - - /** max bytes to write in one call */ -#define MAX_WRITE (0x80000000U >> (sizeof(ssize_t) == 4)) - - /** Check \b txn and \b dbi arguments to a function */ -#define TXN_DBI_EXIST(txn, dbi) \ - ((txn) && (dbi) < (txn)->mt_numdbs && ((txn)->mt_dbflags[dbi] & DB_VALID)) - - /** Check for misused \b dbi handles */ -#define TXN_DBI_CHANGED(txn, dbi) \ - ((txn)->mt_dbiseqs[dbi] != (txn)->mt_env->me_dbiseqs[dbi]) - -static int mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp); -static int mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp); -static int mdb_page_touch(MDB_cursor *mc); - -static int mdb_page_get(MDB_txn *txn, pgno_t pgno, MDB_page **mp, int *lvl); -static int mdb_page_search_root(MDB_cursor *mc, - MDB_val *key, int modify); -#define MDB_PS_MODIFY 1 -#define MDB_PS_ROOTONLY 2 -#define MDB_PS_FIRST 4 -#define MDB_PS_LAST 8 -static int mdb_page_search(MDB_cursor *mc, - MDB_val *key, int flags); -static int mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst); - -#define MDB_SPLIT_REPLACE MDB_APPENDDUP /**< newkey is not new */ -static int mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, - pgno_t newpgno, unsigned int nflags); - -static int mdb_env_read_header(MDB_env *env, MDB_meta *meta); -static int mdb_env_pick_meta(const MDB_env *env); -static int mdb_env_write_meta(MDB_txn *txn); -#if !(defined(_WIN32) || defined(MDB_USE_POSIX_SEM)) /* Drop unused excl arg */ -# define mdb_env_close0(env, excl) mdb_env_close1(env) -#endif -static void mdb_env_close0(MDB_env *env, int excl); - -static MDB_node *mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp); -static int mdb_node_add(MDB_cursor *mc, indx_t indx, - MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags); -static void mdb_node_del(MDB_cursor *mc, int ksize); -static void mdb_node_shrink(MDB_page *mp, indx_t indx); -static int mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst); -static int mdb_node_read(MDB_txn *txn, MDB_node *leaf, MDB_val *data); -static size_t mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data); -static size_t mdb_branch_size(MDB_env *env, MDB_val *key); - -static int mdb_rebalance(MDB_cursor *mc); -static int mdb_update_key(MDB_cursor *mc, MDB_val *key); - -static void mdb_cursor_pop(MDB_cursor *mc); -static int mdb_cursor_push(MDB_cursor *mc, MDB_page *mp); - -static int mdb_cursor_del0(MDB_cursor *mc); -static int mdb_del0(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, unsigned flags); -static int mdb_cursor_sibling(MDB_cursor *mc, int move_right); -static int mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op); -static int mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op); -static int mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op, - int *exactp); -static int mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data); -static int mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data); - -static void mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx); -static void mdb_xcursor_init0(MDB_cursor *mc); -static void mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node); - -static int mdb_drop0(MDB_cursor *mc, int subs); -static void mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi); - -/** @cond */ -static MDB_cmp_func mdb_cmp_memn, mdb_cmp_memnr, mdb_cmp_int, mdb_cmp_cint, mdb_cmp_long; -/** @endcond */ - -#ifdef _WIN32 -static SECURITY_DESCRIPTOR mdb_null_sd; -static SECURITY_ATTRIBUTES mdb_all_sa; -static int mdb_sec_inited; -#endif - -/** Return the library version info. */ -char * -mdb_version(int *major, int *minor, int *patch) -{ - if (major) *major = MDB_VERSION_MAJOR; - if (minor) *minor = MDB_VERSION_MINOR; - if (patch) *patch = MDB_VERSION_PATCH; - return MDB_VERSION_STRING; -} - -/** Table of descriptions for LMDB @ref errors */ -static char *const mdb_errstr[] = { - "MDB_KEYEXIST: Key/data pair already exists", - "MDB_NOTFOUND: No matching key/data pair found", - "MDB_PAGE_NOTFOUND: Requested page not found", - "MDB_CORRUPTED: Located page was wrong type", - "MDB_PANIC: Update of meta page failed", - "MDB_VERSION_MISMATCH: Database environment version mismatch", - "MDB_INVALID: File is not an LMDB file", - "MDB_MAP_FULL: Environment mapsize limit reached", - "MDB_DBS_FULL: Environment maxdbs limit reached", - "MDB_READERS_FULL: Environment maxreaders limit reached", - "MDB_TLS_FULL: Thread-local storage keys full - too many environments open", - "MDB_TXN_FULL: Transaction has too many dirty pages - transaction too big", - "MDB_CURSOR_FULL: Internal error - cursor stack limit reached", - "MDB_PAGE_FULL: Internal error - page has no more space", - "MDB_MAP_RESIZED: Database contents grew beyond environment mapsize", - "MDB_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed", - "MDB_BAD_RSLOT: Invalid reuse of reader locktable slot", - "MDB_BAD_TXN: Transaction cannot recover - it must be aborted", - "MDB_BAD_VALSIZE: Unsupported size of key/DB name/data, or wrong DUPFIXED size", - "MDB_BAD_DBI: The specified DBI handle was closed/changed unexpectedly", -}; - -char * -mdb_strerror(int err) -{ -#ifdef _WIN32 - /** HACK: pad 4KB on stack over the buf. Return system msgs in buf. - * This works as long as no function between the call to mdb_strerror - * and the actual use of the message uses more than 4K of stack. - */ - char pad[4096]; - char buf[1024], *ptr = buf; -#endif - int i; - if (!err) - return ("Successful return: 0"); - - if (err >= MDB_KEYEXIST && err <= MDB_LAST_ERRCODE) { - i = err - MDB_KEYEXIST; - return mdb_errstr[i]; - } - -#ifdef _WIN32 - /* These are the C-runtime error codes we use. The comment indicates - * their numeric value, and the Win32 error they would correspond to - * if the error actually came from a Win32 API. A major mess, we should - * have used LMDB-specific error codes for everything. - */ - switch(err) { - case ENOENT: /* 2, FILE_NOT_FOUND */ - case EIO: /* 5, ACCESS_DENIED */ - case ENOMEM: /* 12, INVALID_ACCESS */ - case EACCES: /* 13, INVALID_DATA */ - case EBUSY: /* 16, CURRENT_DIRECTORY */ - case EINVAL: /* 22, BAD_COMMAND */ - case ENOSPC: /* 28, OUT_OF_PAPER */ - return strerror(err); - default: - ; - } - buf[0] = 0; - FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, err, 0, ptr, sizeof(buf), pad); - return ptr; -#else - return strerror(err); -#endif -} - -/** assert(3) variant in cursor context */ -#define mdb_cassert(mc, expr) mdb_assert0((mc)->mc_txn->mt_env, expr, #expr) -/** assert(3) variant in transaction context */ -#define mdb_tassert(mc, expr) mdb_assert0((txn)->mt_env, expr, #expr) -/** assert(3) variant in environment context */ -#define mdb_eassert(env, expr) mdb_assert0(env, expr, #expr) - -#ifndef NDEBUG -# define mdb_assert0(env, expr, expr_txt) ((expr) ? (void)0 : \ - mdb_assert_fail(env, expr_txt, mdb_func_, __FILE__, __LINE__)) - -static void -mdb_assert_fail(MDB_env *env, const char *expr_txt, - const char *func, const char *file, int line) -{ - char buf[400]; - sprintf(buf, "%.100s:%d: Assertion '%.200s' failed in %.40s()", - file, line, expr_txt, func); - if (env->me_assert_func) - env->me_assert_func(env, buf); - fprintf(stderr, "%s\n", buf); - abort(); -} -#else -# define mdb_assert0(env, expr, expr_txt) ((void) 0) -#endif /* NDEBUG */ - -#if MDB_DEBUG -/** Return the page number of \b mp which may be sub-page, for debug output */ -static pgno_t -mdb_dbg_pgno(MDB_page *mp) -{ - pgno_t ret; - COPY_PGNO(ret, mp->mp_pgno); - return ret; -} - -/** Display a key in hexadecimal and return the address of the result. - * @param[in] key the key to display - * @param[in] buf the buffer to write into. Should always be #DKBUF. - * @return The key in hexadecimal form. - */ -char * -mdb_dkey(MDB_val *key, char *buf) -{ - char *ptr = buf; - unsigned char *c = key->mv_data; - unsigned int i; - - if (!key) - return ""; - - if (key->mv_size > DKBUF_MAXKEYSIZE) - return "MDB_MAXKEYSIZE"; - /* may want to make this a dynamic check: if the key is mostly - * printable characters, print it as-is instead of converting to hex. - */ -#if 1 - buf[0] = '\0'; - for (i=0; imv_size; i++) - ptr += sprintf(ptr, "%02x", *c++); -#else - sprintf(buf, "%.*s", key->mv_size, key->mv_data); -#endif - return buf; -} - -static const char * -mdb_leafnode_type(MDB_node *n) -{ - static char *const tp[2][2] = {{"", ": DB"}, {": sub-page", ": sub-DB"}}; - return F_ISSET(n->mn_flags, F_BIGDATA) ? ": overflow page" : - tp[F_ISSET(n->mn_flags, F_DUPDATA)][F_ISSET(n->mn_flags, F_SUBDATA)]; -} - -/** Display all the keys in the page. */ -void -mdb_page_list(MDB_page *mp) -{ - pgno_t pgno = mdb_dbg_pgno(mp); - const char *type, *state = (mp->mp_flags & P_DIRTY) ? ", dirty" : ""; - MDB_node *node; - unsigned int i, nkeys, nsize, total = 0; - MDB_val key; - DKBUF; - - switch (mp->mp_flags & (P_BRANCH|P_LEAF|P_LEAF2|P_META|P_OVERFLOW|P_SUBP)) { - case P_BRANCH: type = "Branch page"; break; - case P_LEAF: type = "Leaf page"; break; - case P_LEAF|P_SUBP: type = "Sub-page"; break; - case P_LEAF|P_LEAF2: type = "LEAF2 page"; break; - case P_LEAF|P_LEAF2|P_SUBP: type = "LEAF2 sub-page"; break; - case P_OVERFLOW: - fprintf(stderr, "Overflow page %"Z"u pages %u%s\n", - pgno, mp->mp_pages, state); - return; - case P_META: - fprintf(stderr, "Meta-page %"Z"u txnid %"Z"u\n", - pgno, ((MDB_meta *)METADATA(mp))->mm_txnid); - return; - default: - fprintf(stderr, "Bad page %"Z"u flags 0x%u\n", pgno, mp->mp_flags); - return; - } - - nkeys = NUMKEYS(mp); - fprintf(stderr, "%s %"Z"u numkeys %d%s\n", type, pgno, nkeys, state); - - for (i=0; imp_pad; - key.mv_data = LEAF2KEY(mp, i, nsize); - total += nsize; - fprintf(stderr, "key %d: nsize %d, %s\n", i, nsize, DKEY(&key)); - continue; - } - node = NODEPTR(mp, i); - key.mv_size = node->mn_ksize; - key.mv_data = node->mn_data; - nsize = NODESIZE + key.mv_size; - if (IS_BRANCH(mp)) { - fprintf(stderr, "key %d: page %"Z"u, %s\n", i, NODEPGNO(node), - DKEY(&key)); - total += nsize; - } else { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - nsize += sizeof(pgno_t); - else - nsize += NODEDSZ(node); - total += nsize; - nsize += sizeof(indx_t); - fprintf(stderr, "key %d: nsize %d, %s%s\n", - i, nsize, DKEY(&key), mdb_leafnode_type(node)); - } - total = EVEN(total); - } - fprintf(stderr, "Total: header %d + contents %d + unused %d\n", - IS_LEAF2(mp) ? PAGEHDRSZ : PAGEBASE + mp->mp_lower, total, SIZELEFT(mp)); -} - -void -mdb_cursor_chk(MDB_cursor *mc) -{ - unsigned int i; - MDB_node *node; - MDB_page *mp; - - if (!mc->mc_snum && !(mc->mc_flags & C_INITIALIZED)) return; - for (i=0; imc_top; i++) { - mp = mc->mc_pg[i]; - node = NODEPTR(mp, mc->mc_ki[i]); - if (NODEPGNO(node) != mc->mc_pg[i+1]->mp_pgno) - printf("oops!\n"); - } - if (mc->mc_ki[i] >= NUMKEYS(mc->mc_pg[i])) - printf("ack!\n"); -} -#endif - -#if (MDB_DEBUG) > 2 -/** Count all the pages in each DB and in the freelist - * and make sure it matches the actual number of pages - * being used. - * All named DBs must be open for a correct count. - */ -static void mdb_audit(MDB_txn *txn) -{ - MDB_cursor mc; - MDB_val key, data; - MDB_ID freecount, count; - MDB_dbi i; - int rc; - - freecount = 0; - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0) - freecount += *(MDB_ID *)data.mv_data; - mdb_tassert(txn, rc == MDB_NOTFOUND); - - count = 0; - for (i = 0; imt_numdbs; i++) { - MDB_xcursor mx; - if (!(txn->mt_dbflags[i] & DB_VALID)) - continue; - mdb_cursor_init(&mc, txn, i, &mx); - if (txn->mt_dbs[i].md_root == P_INVALID) - continue; - count += txn->mt_dbs[i].md_branch_pages + - txn->mt_dbs[i].md_leaf_pages + - txn->mt_dbs[i].md_overflow_pages; - if (txn->mt_dbs[i].md_flags & MDB_DUPSORT) { - rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST); - for (; rc == MDB_SUCCESS; rc = mdb_cursor_sibling(&mc, 1)) { - unsigned j; - MDB_page *mp; - mp = mc.mc_pg[mc.mc_top]; - for (j=0; jmn_flags & F_SUBDATA) { - MDB_db db; - memcpy(&db, NODEDATA(leaf), sizeof(db)); - count += db.md_branch_pages + db.md_leaf_pages + - db.md_overflow_pages; - } - } - } - mdb_tassert(txn, rc == MDB_NOTFOUND); - } - } - if (freecount + count + 2 /* metapages */ != txn->mt_next_pgno) { - fprintf(stderr, "audit: %lu freecount: %lu count: %lu total: %lu next_pgno: %lu\n", - txn->mt_txnid, freecount, count+2, freecount+count+2, txn->mt_next_pgno); - } -} -#endif - -int -mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b) -{ - return txn->mt_dbxs[dbi].md_cmp(a, b); -} - -int -mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b) -{ - return txn->mt_dbxs[dbi].md_dcmp(a, b); -} - -/** Allocate memory for a page. - * Re-use old malloc'd pages first for singletons, otherwise just malloc. - */ -static MDB_page * -mdb_page_malloc(MDB_txn *txn, unsigned num) -{ - MDB_env *env = txn->mt_env; - MDB_page *ret = env->me_dpages; - size_t psize = env->me_psize, sz = psize, off; - /* For ! #MDB_NOMEMINIT, psize counts how much to init. - * For a single page alloc, we init everything after the page header. - * For multi-page, we init the final page; if the caller needed that - * many pages they will be filling in at least up to the last page. - */ - if (num == 1) { - if (ret) { - VGMEMP_ALLOC(env, ret, sz); - VGMEMP_DEFINED(ret, sizeof(ret->mp_next)); - env->me_dpages = ret->mp_next; - return ret; - } - psize -= off = PAGEHDRSZ; - } else { - sz *= num; - off = sz - psize; - } - if ((ret = malloc(sz)) != NULL) { - VGMEMP_ALLOC(env, ret, sz); - if (!(env->me_flags & MDB_NOMEMINIT)) { - memset((char *)ret + off, 0, psize); - ret->mp_pad = 0; - } - } else { - txn->mt_flags |= MDB_TXN_ERROR; - } - return ret; -} -/** Free a single page. - * Saves single pages to a list, for future reuse. - * (This is not used for multi-page overflow pages.) - */ -static void -mdb_page_free(MDB_env *env, MDB_page *mp) -{ - mp->mp_next = env->me_dpages; - VGMEMP_FREE(env, mp); - env->me_dpages = mp; -} - -/** Free a dirty page */ -static void -mdb_dpage_free(MDB_env *env, MDB_page *dp) -{ - if (!IS_OVERFLOW(dp) || dp->mp_pages == 1) { - mdb_page_free(env, dp); - } else { - /* large pages just get freed directly */ - VGMEMP_FREE(env, dp); - free(dp); - } -} - -/** Return all dirty pages to dpage list */ -static void -mdb_dlist_free(MDB_txn *txn) -{ - MDB_env *env = txn->mt_env; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned i, n = dl[0].mid; - - for (i = 1; i <= n; i++) { - mdb_dpage_free(env, dl[i].mptr); - } - dl[0].mid = 0; -} - -/** Loosen or free a single page. - * Saves single pages to a list for future reuse - * in this same txn. It has been pulled from the freeDB - * and already resides on the dirty list, but has been - * deleted. Use these pages first before pulling again - * from the freeDB. - * - * If the page wasn't dirtied in this txn, just add it - * to this txn's free list. - */ -static int -mdb_page_loose(MDB_cursor *mc, MDB_page *mp) -{ - int loose = 0; - pgno_t pgno = mp->mp_pgno; - MDB_txn *txn = mc->mc_txn; - - if ((mp->mp_flags & P_DIRTY) && mc->mc_dbi != FREE_DBI) { - if (txn->mt_parent) { - MDB_ID2 *dl = txn->mt_u.dirty_list; - /* If txn has a parent, make sure the page is in our - * dirty list. - */ - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - if (mp != dl[x].mptr) { /* bad cursor? */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - /* ok, it's ours */ - loose = 1; - } - } - } else { - /* no parent txn, so it's just ours */ - loose = 1; - } - } - if (loose) { - DPRINTF(("loosen db %d page %"Z"u", DDBI(mc), - mp->mp_pgno)); - NEXT_LOOSE_PAGE(mp) = txn->mt_loose_pgs; - txn->mt_loose_pgs = mp; - txn->mt_loose_count++; - mp->mp_flags |= P_LOOSE; - } else { - int rc = mdb_midl_append(&txn->mt_free_pgs, pgno); - if (rc) - return rc; - } - - return MDB_SUCCESS; -} - -/** Set or clear P_KEEP in dirty, non-overflow, non-sub pages watched by txn. - * @param[in] mc A cursor handle for the current operation. - * @param[in] pflags Flags of the pages to update: - * P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it. - * @param[in] all No shortcuts. Needed except after a full #mdb_page_flush(). - * @return 0 on success, non-zero on failure. - */ -static int -mdb_pages_xkeep(MDB_cursor *mc, unsigned pflags, int all) -{ - enum { Mask = P_SUBP|P_DIRTY|P_LOOSE|P_KEEP }; - MDB_txn *txn = mc->mc_txn; - MDB_cursor *m3; - MDB_xcursor *mx; - MDB_page *dp, *mp; - MDB_node *leaf; - unsigned i, j; - int rc = MDB_SUCCESS, level; - - /* Mark pages seen by cursors */ - if (mc->mc_flags & C_UNTRACK) - mc = NULL; /* will find mc in mt_cursors */ - for (i = txn->mt_numdbs;; mc = txn->mt_cursors[--i]) { - for (; mc; mc=mc->mc_next) { - if (!(mc->mc_flags & C_INITIALIZED)) - continue; - for (m3 = mc;; m3 = &mx->mx_cursor) { - mp = NULL; - for (j=0; jmc_snum; j++) { - mp = m3->mc_pg[j]; - if ((mp->mp_flags & Mask) == pflags) - mp->mp_flags ^= P_KEEP; - } - mx = m3->mc_xcursor; - /* Proceed to mx if it is at a sub-database */ - if (! (mx && (mx->mx_cursor.mc_flags & C_INITIALIZED))) - break; - if (! (mp && (mp->mp_flags & P_LEAF))) - break; - leaf = NODEPTR(mp, m3->mc_ki[j-1]); - if (!(leaf->mn_flags & F_SUBDATA)) - break; - } - } - if (i == 0) - break; - } - - if (all) { - /* Mark dirty root pages */ - for (i=0; imt_numdbs; i++) { - if (txn->mt_dbflags[i] & DB_DIRTY) { - pgno_t pgno = txn->mt_dbs[i].md_root; - if (pgno == P_INVALID) - continue; - if ((rc = mdb_page_get(txn, pgno, &dp, &level)) != MDB_SUCCESS) - break; - if ((dp->mp_flags & Mask) == pflags && level <= 1) - dp->mp_flags ^= P_KEEP; - } - } - } - - return rc; -} - -static int mdb_page_flush(MDB_txn *txn, int keep); - -/** Spill pages from the dirty list back to disk. - * This is intended to prevent running into #MDB_TXN_FULL situations, - * but note that they may still occur in a few cases: - * 1) our estimate of the txn size could be too small. Currently this - * seems unlikely, except with a large number of #MDB_MULTIPLE items. - * 2) child txns may run out of space if their parents dirtied a - * lot of pages and never spilled them. TODO: we probably should do - * a preemptive spill during #mdb_txn_begin() of a child txn, if - * the parent's dirty_room is below a given threshold. - * - * Otherwise, if not using nested txns, it is expected that apps will - * not run into #MDB_TXN_FULL any more. The pages are flushed to disk - * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared. - * If the txn never references them again, they can be left alone. - * If the txn only reads them, they can be used without any fuss. - * If the txn writes them again, they can be dirtied immediately without - * going thru all of the work of #mdb_page_touch(). Such references are - * handled by #mdb_page_unspill(). - * - * Also note, we never spill DB root pages, nor pages of active cursors, - * because we'll need these back again soon anyway. And in nested txns, - * we can't spill a page in a child txn if it was already spilled in a - * parent txn. That would alter the parent txns' data even though - * the child hasn't committed yet, and we'd have no way to undo it if - * the child aborted. - * - * @param[in] m0 cursor A cursor handle identifying the transaction and - * database for which we are checking space. - * @param[in] key For a put operation, the key being stored. - * @param[in] data For a put operation, the data being stored. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_spill(MDB_cursor *m0, MDB_val *key, MDB_val *data) -{ - MDB_txn *txn = m0->mc_txn; - MDB_page *dp; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned int i, j, need; - int rc; - - if (m0->mc_flags & C_SUB) - return MDB_SUCCESS; - - /* Estimate how much space this op will take */ - i = m0->mc_db->md_depth; - /* Named DBs also dirty the main DB */ - if (m0->mc_dbi > MAIN_DBI) - i += txn->mt_dbs[MAIN_DBI].md_depth; - /* For puts, roughly factor in the key+data size */ - if (key) - i += (LEAFSIZE(key, data) + txn->mt_env->me_psize) / txn->mt_env->me_psize; - i += i; /* double it for good measure */ - need = i; - - if (txn->mt_dirty_room > i) - return MDB_SUCCESS; - - if (!txn->mt_spill_pgs) { - txn->mt_spill_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX); - if (!txn->mt_spill_pgs) - return ENOMEM; - } else { - /* purge deleted slots */ - MDB_IDL sl = txn->mt_spill_pgs; - unsigned int num = sl[0]; - j=0; - for (i=1; i<=num; i++) { - if (!(sl[i] & 1)) - sl[++j] = sl[i]; - } - sl[0] = j; - } - - /* Preserve pages which may soon be dirtied again */ - if ((rc = mdb_pages_xkeep(m0, P_DIRTY, 1)) != MDB_SUCCESS) - goto done; - - /* Less aggressive spill - we originally spilled the entire dirty list, - * with a few exceptions for cursor pages and DB root pages. But this - * turns out to be a lot of wasted effort because in a large txn many - * of those pages will need to be used again. So now we spill only 1/8th - * of the dirty pages. Testing revealed this to be a good tradeoff, - * better than 1/2, 1/4, or 1/10. - */ - if (need < MDB_IDL_UM_MAX / 8) - need = MDB_IDL_UM_MAX / 8; - - /* Save the page IDs of all the pages we're flushing */ - /* flush from the tail forward, this saves a lot of shifting later on. */ - for (i=dl[0].mid; i && need; i--) { - MDB_ID pn = dl[i].mid << 1; - dp = dl[i].mptr; - if (dp->mp_flags & (P_LOOSE|P_KEEP)) - continue; - /* Can't spill twice, make sure it's not already in a parent's - * spill list. - */ - if (txn->mt_parent) { - MDB_txn *tx2; - for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) { - if (tx2->mt_spill_pgs) { - j = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (j <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[j] == pn) { - dp->mp_flags |= P_KEEP; - break; - } - } - } - if (tx2) - continue; - } - if ((rc = mdb_midl_append(&txn->mt_spill_pgs, pn))) - goto done; - need--; - } - mdb_midl_sort(txn->mt_spill_pgs); - - /* Flush the spilled part of dirty list */ - if ((rc = mdb_page_flush(txn, i)) != MDB_SUCCESS) - goto done; - - /* Reset any dirty pages we kept that page_flush didn't see */ - rc = mdb_pages_xkeep(m0, P_DIRTY|P_KEEP, i); - -done: - txn->mt_flags |= rc ? MDB_TXN_ERROR : MDB_TXN_SPILLS; - return rc; -} - -/** Find oldest txnid still referenced. Expects txn->mt_txnid > 0. */ -static txnid_t -mdb_find_oldest(MDB_txn *txn) -{ - int i; - txnid_t mr, oldest = txn->mt_txnid - 1; - if (txn->mt_env->me_txns) { - MDB_reader *r = txn->mt_env->me_txns->mti_readers; - for (i = txn->mt_env->me_txns->mti_numreaders; --i >= 0; ) { - if (r[i].mr_pid) { - mr = r[i].mr_txnid; - if (oldest > mr) - oldest = mr; - } - } - } - return oldest; -} - -/** Add a page to the txn's dirty list */ -static void -mdb_page_dirty(MDB_txn *txn, MDB_page *mp) -{ - MDB_ID2 mid; - int rc, (*insert)(MDB_ID2L, MDB_ID2 *); - - if (txn->mt_env->me_flags & MDB_WRITEMAP) { - insert = mdb_mid2l_append; - } else { - insert = mdb_mid2l_insert; - } - mid.mid = mp->mp_pgno; - mid.mptr = mp; - rc = insert(txn->mt_u.dirty_list, &mid); - mdb_tassert(txn, rc == 0); - txn->mt_dirty_room--; -} - -/** Allocate page numbers and memory for writing. Maintain me_pglast, - * me_pghead and mt_next_pgno. - * - * If there are free pages available from older transactions, they - * are re-used first. Otherwise allocate a new page at mt_next_pgno. - * Do not modify the freedB, just merge freeDB records into me_pghead[] - * and move me_pglast to say which records were consumed. Only this - * function can create me_pghead and move me_pglast/mt_next_pgno. - * @param[in] mc cursor A cursor handle identifying the transaction and - * database for which we are allocating. - * @param[in] num the number of pages to allocate. - * @param[out] mp Address of the allocated page(s). Requests for multiple pages - * will always be satisfied by a single contiguous chunk of memory. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp) -{ -#ifdef MDB_PARANOID /* Seems like we can ignore this now */ - /* Get at most more freeDB records once me_pghead - * has enough pages. If not enough, use new pages from the map. - * If and mc is updating the freeDB, only get new - * records if me_pghead is empty. Then the freelist cannot play - * catch-up with itself by growing while trying to save it. - */ - enum { Paranoid = 1, Max_retries = 500 }; -#else - enum { Paranoid = 0, Max_retries = INT_MAX /*infinite*/ }; -#endif - int rc, retry = num * 20; - MDB_txn *txn = mc->mc_txn; - MDB_env *env = txn->mt_env; - pgno_t pgno, *mop = env->me_pghead; - unsigned i, j, mop_len = mop ? mop[0] : 0, n2 = num-1; - MDB_page *np; - txnid_t oldest = 0, last; - MDB_cursor_op op; - MDB_cursor m2; - - /* If there are any loose pages, just use them */ - if (num == 1 && txn->mt_loose_pgs) { - np = txn->mt_loose_pgs; - txn->mt_loose_pgs = NEXT_LOOSE_PAGE(np); - txn->mt_loose_count--; - DPRINTF(("db %d use loose page %"Z"u", DDBI(mc), - np->mp_pgno)); - *mp = np; - return MDB_SUCCESS; - } - - *mp = NULL; - - /* If our dirty list is already full, we can't do anything */ - if (txn->mt_dirty_room == 0) { - rc = MDB_TXN_FULL; - goto fail; - } - - for (op = MDB_FIRST;; op = MDB_NEXT) { - MDB_val key, data; - MDB_node *leaf; - pgno_t *idl; - - /* Seek a big enough contiguous page range. Prefer - * pages at the tail, just truncating the list. - */ - if (mop_len > n2) { - i = mop_len; - do { - pgno = mop[i]; - if (mop[i-n2] == pgno+n2) - goto search_done; - } while (--i > n2); - if (--retry < 0) - break; - } - - if (op == MDB_FIRST) { /* 1st iteration */ - /* Prepare to fetch more and coalesce */ - oldest = mdb_find_oldest(txn); - last = env->me_pglast; - mdb_cursor_init(&m2, txn, FREE_DBI, NULL); - if (last) { - op = MDB_SET_RANGE; - key.mv_data = &last; /* will look up last+1 */ - key.mv_size = sizeof(last); - } - if (Paranoid && mc->mc_dbi == FREE_DBI) - retry = -1; - } - if (Paranoid && retry < 0 && mop_len) - break; - - last++; - /* Do not fetch more if the record will be too recent */ - if (oldest <= last) - break; - rc = mdb_cursor_get(&m2, &key, NULL, op); - if (rc) { - if (rc == MDB_NOTFOUND) - break; - goto fail; - } - last = *(txnid_t*)key.mv_data; - if (oldest <= last) - break; - np = m2.mc_pg[m2.mc_top]; - leaf = NODEPTR(np, m2.mc_ki[m2.mc_top]); - if ((rc = mdb_node_read(txn, leaf, &data)) != MDB_SUCCESS) - return rc; - - idl = (MDB_ID *) data.mv_data; - i = idl[0]; - if (!mop) { - if (!(env->me_pghead = mop = mdb_midl_alloc(i))) { - rc = ENOMEM; - goto fail; - } - } else { - if ((rc = mdb_midl_need(&env->me_pghead, i)) != 0) - goto fail; - mop = env->me_pghead; - } - env->me_pglast = last; -#if (MDB_DEBUG) > 1 - DPRINTF(("IDL read txn %"Z"u root %"Z"u num %u", - last, txn->mt_dbs[FREE_DBI].md_root, i)); - for (j = i; j; j--) - DPRINTF(("IDL %"Z"u", idl[j])); -#endif - /* Merge in descending sorted order */ - mdb_midl_xmerge(mop, idl); - mop_len = mop[0]; - } - - /* Use new pages from the map when nothing suitable in the freeDB */ - i = 0; - pgno = txn->mt_next_pgno; - if (pgno + num >= env->me_maxpg) { - DPUTS("DB size maxed out"); - rc = MDB_MAP_FULL; - goto fail; - } - -search_done: - if (env->me_flags & MDB_WRITEMAP) { - np = (MDB_page *)(env->me_map + env->me_psize * pgno); - } else { - if (!(np = mdb_page_malloc(txn, num))) { - rc = ENOMEM; - goto fail; - } - } - if (i) { - mop[0] = mop_len -= num; - /* Move any stragglers down */ - for (j = i-num; j < mop_len; ) - mop[++j] = mop[++i]; - } else { - txn->mt_next_pgno = pgno + num; - } - np->mp_pgno = pgno; - mdb_page_dirty(txn, np); - *mp = np; - - return MDB_SUCCESS; - -fail: - txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -/** Copy the used portions of a non-overflow page. - * @param[in] dst page to copy into - * @param[in] src page to copy from - * @param[in] psize size of a page - */ -static void -mdb_page_copy(MDB_page *dst, MDB_page *src, unsigned int psize) -{ - enum { Align = sizeof(pgno_t) }; - indx_t upper = src->mp_upper, lower = src->mp_lower, unused = upper-lower; - - /* If page isn't full, just copy the used portion. Adjust - * alignment so memcpy may copy words instead of bytes. - */ - if ((unused &= -Align) && !IS_LEAF2(src)) { - upper = (upper + PAGEBASE) & -Align; - memcpy(dst, src, (lower + PAGEBASE + (Align-1)) & -Align); - memcpy((pgno_t *)((char *)dst+upper), (pgno_t *)((char *)src+upper), - psize - upper); - } else { - memcpy(dst, src, psize - unused); - } -} - -/** Pull a page off the txn's spill list, if present. - * If a page being referenced was spilled to disk in this txn, bring - * it back and make it dirty/writable again. - * @param[in] txn the transaction handle. - * @param[in] mp the page being referenced. It must not be dirty. - * @param[out] ret the writable page, if any. ret is unchanged if - * mp wasn't spilled. - */ -static int -mdb_page_unspill(MDB_txn *txn, MDB_page *mp, MDB_page **ret) -{ - MDB_env *env = txn->mt_env; - const MDB_txn *tx2; - unsigned x; - pgno_t pgno = mp->mp_pgno, pn = pgno << 1; - - for (tx2 = txn; tx2; tx2=tx2->mt_parent) { - if (!tx2->mt_spill_pgs) - continue; - x = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) { - MDB_page *np; - int num; - if (txn->mt_dirty_room == 0) - return MDB_TXN_FULL; - if (IS_OVERFLOW(mp)) - num = mp->mp_pages; - else - num = 1; - if (env->me_flags & MDB_WRITEMAP) { - np = mp; - } else { - np = mdb_page_malloc(txn, num); - if (!np) - return ENOMEM; - if (num > 1) - memcpy(np, mp, num * env->me_psize); - else - mdb_page_copy(np, mp, env->me_psize); - } - if (tx2 == txn) { - /* If in current txn, this page is no longer spilled. - * If it happens to be the last page, truncate the spill list. - * Otherwise mark it as deleted by setting the LSB. - */ - if (x == txn->mt_spill_pgs[0]) - txn->mt_spill_pgs[0]--; - else - txn->mt_spill_pgs[x] |= 1; - } /* otherwise, if belonging to a parent txn, the - * page remains spilled until child commits - */ - - mdb_page_dirty(txn, np); - np->mp_flags |= P_DIRTY; - *ret = np; - break; - } - } - return MDB_SUCCESS; -} - -/** Touch a page: make it dirty and re-insert into tree with updated pgno. - * @param[in] mc cursor pointing to the page to be touched - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_touch(MDB_cursor *mc) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top], *np; - MDB_txn *txn = mc->mc_txn; - MDB_cursor *m2, *m3; - pgno_t pgno; - int rc; - - if (!F_ISSET(mp->mp_flags, P_DIRTY)) { - if (txn->mt_flags & MDB_TXN_SPILLS) { - np = NULL; - rc = mdb_page_unspill(txn, mp, &np); - if (rc) - goto fail; - if (np) - goto done; - } - if ((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) || - (rc = mdb_page_alloc(mc, 1, &np))) - goto fail; - pgno = np->mp_pgno; - DPRINTF(("touched db %d page %"Z"u -> %"Z"u", DDBI(mc), - mp->mp_pgno, pgno)); - mdb_cassert(mc, mp->mp_pgno != pgno); - mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno); - /* Update the parent page, if any, to point to the new page */ - if (mc->mc_top) { - MDB_page *parent = mc->mc_pg[mc->mc_top-1]; - MDB_node *node = NODEPTR(parent, mc->mc_ki[mc->mc_top-1]); - SETPGNO(node, pgno); - } else { - mc->mc_db->md_root = pgno; - } - } else if (txn->mt_parent && !IS_SUBP(mp)) { - MDB_ID2 mid, *dl = txn->mt_u.dirty_list; - pgno = mp->mp_pgno; - /* If txn has a parent, make sure the page is in our - * dirty list. - */ - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - if (mp != dl[x].mptr) { /* bad cursor? */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - return 0; - } - } - mdb_cassert(mc, dl[0].mid < MDB_IDL_UM_MAX); - /* No - copy it */ - np = mdb_page_malloc(txn, 1); - if (!np) - return ENOMEM; - mid.mid = pgno; - mid.mptr = np; - rc = mdb_mid2l_insert(dl, &mid); - mdb_cassert(mc, rc == 0); - } else { - return 0; - } - - mdb_page_copy(np, mp, txn->mt_env->me_psize); - np->mp_pgno = pgno; - np->mp_flags |= P_DIRTY; - -done: - /* Adjust cursors pointing to mp */ - mc->mc_pg[mc->mc_top] = np; - m2 = txn->mt_cursors[mc->mc_dbi]; - if (mc->mc_flags & C_SUB) { - for (; m2; m2=m2->mc_next) { - m3 = &m2->mc_xcursor->mx_cursor; - if (m3->mc_snum < mc->mc_snum) continue; - if (m3->mc_pg[mc->mc_top] == mp) - m3->mc_pg[mc->mc_top] = np; - } - } else { - for (; m2; m2=m2->mc_next) { - if (m2->mc_snum < mc->mc_snum) continue; - if (m2->mc_pg[mc->mc_top] == mp) { - m2->mc_pg[mc->mc_top] = np; - if ((mc->mc_db->md_flags & MDB_DUPSORT) && - IS_LEAF(np) && - m2->mc_ki[mc->mc_top] == mc->mc_ki[mc->mc_top]) - { - MDB_node *leaf = NODEPTR(np, mc->mc_ki[mc->mc_top]); - if (!(leaf->mn_flags & F_SUBDATA)) - m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - } - } - } - } - return 0; - -fail: - txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_env_sync(MDB_env *env, int force) -{ - int rc = 0; - if (force || !F_ISSET(env->me_flags, MDB_NOSYNC)) { - if (env->me_flags & MDB_WRITEMAP) { - int flags = ((env->me_flags & MDB_MAPASYNC) && !force) - ? MS_ASYNC : MS_SYNC; - if (MDB_MSYNC(env->me_map, env->me_mapsize, flags)) - rc = ErrCode(); -#ifdef _WIN32 - else if (flags == MS_SYNC && MDB_FDATASYNC(env->me_fd)) - rc = ErrCode(); -#endif - } else { - if (MDB_FDATASYNC(env->me_fd)) - rc = ErrCode(); - } - } - return rc; -} - -/** Back up parent txn's cursors, then grab the originals for tracking */ -static int -mdb_cursor_shadow(MDB_txn *src, MDB_txn *dst) -{ - MDB_cursor *mc, *bk; - MDB_xcursor *mx; - size_t size; - int i; - - for (i = src->mt_numdbs; --i >= 0; ) { - if ((mc = src->mt_cursors[i]) != NULL) { - size = sizeof(MDB_cursor); - if (mc->mc_xcursor) - size += sizeof(MDB_xcursor); - for (; mc; mc = bk->mc_next) { - bk = malloc(size); - if (!bk) - return ENOMEM; - *bk = *mc; - mc->mc_backup = bk; - mc->mc_db = &dst->mt_dbs[i]; - /* Kill pointers into src - and dst to reduce abuse: The - * user may not use mc until dst ends. Otherwise we'd... - */ - mc->mc_txn = NULL; /* ...set this to dst */ - mc->mc_dbflag = NULL; /* ...and &dst->mt_dbflags[i] */ - if ((mx = mc->mc_xcursor) != NULL) { - *(MDB_xcursor *)(bk+1) = *mx; - mx->mx_cursor.mc_txn = NULL; /* ...and dst. */ - } - mc->mc_next = dst->mt_cursors[i]; - dst->mt_cursors[i] = mc; - } - } - } - return MDB_SUCCESS; -} - -/** Close this write txn's cursors, give parent txn's cursors back to parent. - * @param[in] txn the transaction handle. - * @param[in] merge true to keep changes to parent cursors, false to revert. - * @return 0 on success, non-zero on failure. - */ -static void -mdb_cursors_close(MDB_txn *txn, unsigned merge) -{ - MDB_cursor **cursors = txn->mt_cursors, *mc, *next, *bk; - MDB_xcursor *mx; - int i; - - for (i = txn->mt_numdbs; --i >= 0; ) { - for (mc = cursors[i]; mc; mc = next) { - next = mc->mc_next; - if ((bk = mc->mc_backup) != NULL) { - if (merge) { - /* Commit changes to parent txn */ - mc->mc_next = bk->mc_next; - mc->mc_backup = bk->mc_backup; - mc->mc_txn = bk->mc_txn; - mc->mc_db = bk->mc_db; - mc->mc_dbflag = bk->mc_dbflag; - if ((mx = mc->mc_xcursor) != NULL) - mx->mx_cursor.mc_txn = bk->mc_txn; - } else { - /* Abort nested txn */ - *mc = *bk; - if ((mx = mc->mc_xcursor) != NULL) - *mx = *(MDB_xcursor *)(bk+1); - } - mc = bk; - } - /* Only malloced cursors are permanently tracked. */ - free(mc); - } - cursors[i] = NULL; - } -} - -#if !(MDB_DEBUG) -#define mdb_txn_reset0(txn, act) mdb_txn_reset0(txn) -#endif -static void -mdb_txn_reset0(MDB_txn *txn, const char *act); - -#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */ -enum Pidlock_op { - Pidset, Pidcheck -}; -#else -enum Pidlock_op { - Pidset = F_SETLK, Pidcheck = F_GETLK -}; -#endif - -/** Set or check a pid lock. Set returns 0 on success. - * Check returns 0 if the process is certainly dead, nonzero if it may - * be alive (the lock exists or an error happened so we do not know). - * - * On Windows Pidset is a no-op, we merely check for the existence - * of the process with the given pid. On POSIX we use a single byte - * lock on the lockfile, set at an offset equal to the pid. - */ -static int -mdb_reader_pid(MDB_env *env, enum Pidlock_op op, MDB_PID_T pid) -{ -#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */ - int ret = 0; - HANDLE h; - if (op == Pidcheck) { - h = OpenProcess(env->me_pidquery, FALSE, pid); - /* No documented "no such process" code, but other program use this: */ - if (!h) - return ErrCode() != ERROR_INVALID_PARAMETER; - /* A process exists until all handles to it close. Has it exited? */ - ret = WaitForSingleObject(h, 0) != 0; - CloseHandle(h); - } - return ret; -#else - for (;;) { - int rc; - struct flock lock_info; - memset(&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_WRLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = pid; - lock_info.l_len = 1; - if ((rc = fcntl(env->me_lfd, op, &lock_info)) == 0) { - if (op == F_GETLK && lock_info.l_type != F_UNLCK) - rc = -1; - } else if ((rc = ErrCode()) == EINTR) { - continue; - } - return rc; - } -#endif -} - -/** Common code for #mdb_txn_begin() and #mdb_txn_renew(). - * @param[in] txn the transaction handle to initialize - * @return 0 on success, non-zero on failure. - */ -static int -mdb_txn_renew0(MDB_txn *txn) -{ - MDB_env *env = txn->mt_env; - MDB_txninfo *ti = env->me_txns; - MDB_meta *meta; - unsigned int i, nr; - uint16_t x; - int rc, new_notls = 0; - - /* Setup db info */ - txn->mt_numdbs = env->me_numdbs; - txn->mt_dbxs = env->me_dbxs; /* mostly static anyway */ - - if (txn->mt_flags & MDB_TXN_RDONLY) { - if (!ti) { - meta = env->me_metas[ mdb_env_pick_meta(env) ]; - txn->mt_txnid = meta->mm_txnid; - txn->mt_u.reader = NULL; - } else { - MDB_reader *r = (env->me_flags & MDB_NOTLS) ? txn->mt_u.reader : - pthread_getspecific(env->me_txkey); - if (r) { - if (r->mr_pid != env->me_pid || r->mr_txnid != (txnid_t)-1) - return MDB_BAD_RSLOT; - } else { - MDB_PID_T pid = env->me_pid; - MDB_THR_T tid = pthread_self(); - - if (!env->me_live_reader) { - rc = mdb_reader_pid(env, Pidset, pid); - if (rc) - return rc; - env->me_live_reader = 1; - } - - LOCK_MUTEX_R(env); - nr = ti->mti_numreaders; - for (i=0; imti_readers[i].mr_pid == 0) - break; - if (i == env->me_maxreaders) { - UNLOCK_MUTEX_R(env); - return MDB_READERS_FULL; - } - ti->mti_readers[i].mr_pid = pid; - ti->mti_readers[i].mr_tid = tid; - if (i == nr) - ti->mti_numreaders = ++nr; - /* Save numreaders for un-mutexed mdb_env_close() */ - env->me_numreaders = nr; - UNLOCK_MUTEX_R(env); - - r = &ti->mti_readers[i]; - new_notls = (env->me_flags & MDB_NOTLS); - if (!new_notls && (rc=pthread_setspecific(env->me_txkey, r))) { - r->mr_pid = 0; - return rc; - } - } - txn->mt_txnid = r->mr_txnid = ti->mti_txnid; - txn->mt_u.reader = r; - meta = env->me_metas[txn->mt_txnid & 1]; - } - } else { - if (ti) { - LOCK_MUTEX_W(env); - - txn->mt_txnid = ti->mti_txnid; - meta = env->me_metas[txn->mt_txnid & 1]; - } else { - meta = env->me_metas[ mdb_env_pick_meta(env) ]; - txn->mt_txnid = meta->mm_txnid; - } - txn->mt_txnid++; -#if MDB_DEBUG - if (txn->mt_txnid == mdb_debug_start) - mdb_debug = 1; -#endif - txn->mt_dirty_room = MDB_IDL_UM_MAX; - txn->mt_u.dirty_list = env->me_dirty_list; - txn->mt_u.dirty_list[0].mid = 0; - txn->mt_free_pgs = env->me_free_pgs; - txn->mt_free_pgs[0] = 0; - txn->mt_spill_pgs = NULL; - env->me_txn = txn; - memcpy(txn->mt_dbiseqs, env->me_dbiseqs, env->me_maxdbs * sizeof(unsigned int)); - } - - /* Copy the DB info and flags */ - memcpy(txn->mt_dbs, meta->mm_dbs, 2 * sizeof(MDB_db)); - - /* Moved to here to avoid a data race in read TXNs */ - txn->mt_next_pgno = meta->mm_last_pg+1; - - for (i=2; imt_numdbs; i++) { - x = env->me_dbflags[i]; - txn->mt_dbs[i].md_flags = x & PERSISTENT_FLAGS; - txn->mt_dbflags[i] = (x & MDB_VALID) ? DB_VALID|DB_STALE : 0; - } - txn->mt_dbflags[0] = txn->mt_dbflags[1] = DB_VALID; - - if (env->me_maxpg < txn->mt_next_pgno) { - mdb_txn_reset0(txn, "renew0-mapfail"); - if (new_notls) { - txn->mt_u.reader->mr_pid = 0; - txn->mt_u.reader = NULL; - } - return MDB_MAP_RESIZED; - } - - return MDB_SUCCESS; -} - -int -mdb_txn_renew(MDB_txn *txn) -{ - int rc; - - if (!txn || txn->mt_dbxs) /* A reset txn has mt_dbxs==NULL */ - return EINVAL; - - if (txn->mt_env->me_flags & MDB_FATAL_ERROR) { - DPUTS("environment had fatal error, must shutdown!"); - return MDB_PANIC; - } - - rc = mdb_txn_renew0(txn); - if (rc == MDB_SUCCESS) { - DPRINTF(("renew txn %"Z"u%c %p on mdbenv %p, root page %"Z"u", - txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', - (void *)txn, (void *)txn->mt_env, txn->mt_dbs[MAIN_DBI].md_root)); - } - return rc; -} - -int -mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **ret) -{ - MDB_txn *txn; - MDB_ntxn *ntxn; - int rc, size, tsize = sizeof(MDB_txn); - - if (env->me_flags & MDB_FATAL_ERROR) { - DPUTS("environment had fatal error, must shutdown!"); - return MDB_PANIC; - } - if ((env->me_flags & MDB_RDONLY) && !(flags & MDB_RDONLY)) - return EACCES; - if (parent) { - /* Nested transactions: Max 1 child, write txns only, no writemap */ - if (parent->mt_child || - (flags & MDB_RDONLY) || - (parent->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_ERROR)) || - (env->me_flags & MDB_WRITEMAP)) - { - return (parent->mt_flags & MDB_TXN_RDONLY) ? EINVAL : MDB_BAD_TXN; - } - tsize = sizeof(MDB_ntxn); - } - size = tsize + env->me_maxdbs * (sizeof(MDB_db)+1); - if (!(flags & MDB_RDONLY)) { - size += env->me_maxdbs * sizeof(MDB_cursor *); - /* child txns use parent's dbiseqs */ - if (!parent) - size += env->me_maxdbs * sizeof(unsigned int); - } - - if ((txn = calloc(1, size)) == NULL) { - DPRINTF(("calloc: %s", strerror(errno))); - return ENOMEM; - } - txn->mt_dbs = (MDB_db *) ((char *)txn + tsize); - if (flags & MDB_RDONLY) { - txn->mt_flags |= MDB_TXN_RDONLY; - txn->mt_dbflags = (unsigned char *)(txn->mt_dbs + env->me_maxdbs); - txn->mt_dbiseqs = env->me_dbiseqs; - } else { - txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs); - if (parent) { - txn->mt_dbiseqs = parent->mt_dbiseqs; - txn->mt_dbflags = (unsigned char *)(txn->mt_cursors + env->me_maxdbs); - } else { - txn->mt_dbiseqs = (unsigned int *)(txn->mt_cursors + env->me_maxdbs); - txn->mt_dbflags = (unsigned char *)(txn->mt_dbiseqs + env->me_maxdbs); - } - } - txn->mt_env = env; - - if (parent) { - unsigned int i; - txn->mt_u.dirty_list = malloc(sizeof(MDB_ID2)*MDB_IDL_UM_SIZE); - if (!txn->mt_u.dirty_list || - !(txn->mt_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX))) - { - free(txn->mt_u.dirty_list); - free(txn); - return ENOMEM; - } - txn->mt_txnid = parent->mt_txnid; - txn->mt_dirty_room = parent->mt_dirty_room; - txn->mt_u.dirty_list[0].mid = 0; - txn->mt_spill_pgs = NULL; - txn->mt_next_pgno = parent->mt_next_pgno; - parent->mt_child = txn; - txn->mt_parent = parent; - txn->mt_numdbs = parent->mt_numdbs; - txn->mt_flags = parent->mt_flags; - txn->mt_dbxs = parent->mt_dbxs; - memcpy(txn->mt_dbs, parent->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); - /* Copy parent's mt_dbflags, but clear DB_NEW */ - for (i=0; imt_numdbs; i++) - txn->mt_dbflags[i] = parent->mt_dbflags[i] & ~DB_NEW; - rc = 0; - ntxn = (MDB_ntxn *)txn; - ntxn->mnt_pgstate = env->me_pgstate; /* save parent me_pghead & co */ - if (env->me_pghead) { - size = MDB_IDL_SIZEOF(env->me_pghead); - env->me_pghead = mdb_midl_alloc(env->me_pghead[0]); - if (env->me_pghead) - memcpy(env->me_pghead, ntxn->mnt_pgstate.mf_pghead, size); - else - rc = ENOMEM; - } - if (!rc) - rc = mdb_cursor_shadow(parent, txn); - if (rc) - mdb_txn_reset0(txn, "beginchild-fail"); - } else { - rc = mdb_txn_renew0(txn); - } - if (rc) - free(txn); - else { - *ret = txn; - DPRINTF(("begin txn %"Z"u%c %p on mdbenv %p, root page %"Z"u", - txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', - (void *) txn, (void *) env, txn->mt_dbs[MAIN_DBI].md_root)); - } - - return rc; -} - -MDB_env * -mdb_txn_env(MDB_txn *txn) -{ - if(!txn) return NULL; - return txn->mt_env; -} - -/** Export or close DBI handles opened in this txn. */ -static void -mdb_dbis_update(MDB_txn *txn, int keep) -{ - int i; - MDB_dbi n = txn->mt_numdbs; - MDB_env *env = txn->mt_env; - unsigned char *tdbflags = txn->mt_dbflags; - - for (i = n; --i >= 2;) { - if (tdbflags[i] & DB_NEW) { - if (keep) { - env->me_dbflags[i] = txn->mt_dbs[i].md_flags | MDB_VALID; - } else { - char *ptr = env->me_dbxs[i].md_name.mv_data; - if (ptr) { - env->me_dbxs[i].md_name.mv_data = NULL; - env->me_dbxs[i].md_name.mv_size = 0; - env->me_dbflags[i] = 0; - env->me_dbiseqs[i]++; - free(ptr); - } - } - } - } - if (keep && env->me_numdbs < n) - env->me_numdbs = n; -} - -/** Common code for #mdb_txn_reset() and #mdb_txn_abort(). - * May be called twice for readonly txns: First reset it, then abort. - * @param[in] txn the transaction handle to reset - * @param[in] act why the transaction is being reset - */ -static void -mdb_txn_reset0(MDB_txn *txn, const char *act) -{ - MDB_env *env = txn->mt_env; - - /* Close any DBI handles opened in this txn */ - mdb_dbis_update(txn, 0); - - DPRINTF(("%s txn %"Z"u%c %p on mdbenv %p, root page %"Z"u", - act, txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', - (void *) txn, (void *)env, txn->mt_dbs[MAIN_DBI].md_root)); - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) { - if (txn->mt_u.reader) { - txn->mt_u.reader->mr_txnid = (txnid_t)-1; - if (!(env->me_flags & MDB_NOTLS)) - txn->mt_u.reader = NULL; /* txn does not own reader */ - } - txn->mt_numdbs = 0; /* close nothing if called again */ - txn->mt_dbxs = NULL; /* mark txn as reset */ - } else { - mdb_cursors_close(txn, 0); - - if (!(env->me_flags & MDB_WRITEMAP)) { - mdb_dlist_free(txn); - } - mdb_midl_free(env->me_pghead); - - if (txn->mt_parent) { - txn->mt_parent->mt_child = NULL; - env->me_pgstate = ((MDB_ntxn *)txn)->mnt_pgstate; - mdb_midl_free(txn->mt_free_pgs); - mdb_midl_free(txn->mt_spill_pgs); - free(txn->mt_u.dirty_list); - return; - } - - if (mdb_midl_shrink(&txn->mt_free_pgs)) - env->me_free_pgs = txn->mt_free_pgs; - env->me_pghead = NULL; - env->me_pglast = 0; - - env->me_txn = NULL; - /* The writer mutex was locked in mdb_txn_begin. */ - if (env->me_txns) - UNLOCK_MUTEX_W(env); - } -} - -void -mdb_txn_reset(MDB_txn *txn) -{ - if (txn == NULL) - return; - - /* This call is only valid for read-only txns */ - if (!(txn->mt_flags & MDB_TXN_RDONLY)) - return; - - mdb_txn_reset0(txn, "reset"); -} - -void -mdb_txn_abort(MDB_txn *txn) -{ - if (txn == NULL) - return; - - if (txn->mt_child) - mdb_txn_abort(txn->mt_child); - - mdb_txn_reset0(txn, "abort"); - /* Free reader slot tied to this txn (if MDB_NOTLS && writable FS) */ - if ((txn->mt_flags & MDB_TXN_RDONLY) && txn->mt_u.reader) - txn->mt_u.reader->mr_pid = 0; - - free(txn); -} - -/** Save the freelist as of this transaction to the freeDB. - * This changes the freelist. Keep trying until it stabilizes. - */ -static int -mdb_freelist_save(MDB_txn *txn) -{ - /* env->me_pghead[] can grow and shrink during this call. - * env->me_pglast and txn->mt_free_pgs[] can only grow. - * Page numbers cannot disappear from txn->mt_free_pgs[]. - */ - MDB_cursor mc; - MDB_env *env = txn->mt_env; - int rc, maxfree_1pg = env->me_maxfree_1pg, more = 1; - txnid_t pglast = 0, head_id = 0; - pgno_t freecnt = 0, *free_pgs, *mop; - ssize_t head_room = 0, total_room = 0, mop_len, clean_limit; - - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - - if (env->me_pghead) { - /* Make sure first page of freeDB is touched and on freelist */ - rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST|MDB_PS_MODIFY); - if (rc && rc != MDB_NOTFOUND) - return rc; - } - - if (!env->me_pghead && txn->mt_loose_pgs) { - /* Put loose page numbers in mt_free_pgs, since - * we may be unable to return them to me_pghead. - */ - MDB_page *mp = txn->mt_loose_pgs; - if ((rc = mdb_midl_need(&txn->mt_free_pgs, txn->mt_loose_count)) != 0) - return rc; - for (; mp; mp = NEXT_LOOSE_PAGE(mp)) - mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno); - txn->mt_loose_pgs = NULL; - txn->mt_loose_count = 0; - } - - /* MDB_RESERVE cancels meminit in ovpage malloc (when no WRITEMAP) */ - clean_limit = (env->me_flags & (MDB_NOMEMINIT|MDB_WRITEMAP)) - ? SSIZE_MAX : maxfree_1pg; - - for (;;) { - /* Come back here after each Put() in case freelist changed */ - MDB_val key, data; - pgno_t *pgs; - ssize_t j; - - /* If using records from freeDB which we have not yet - * deleted, delete them and any we reserved for me_pghead. - */ - while (pglast < env->me_pglast) { - rc = mdb_cursor_first(&mc, &key, NULL); - if (rc) - return rc; - pglast = head_id = *(txnid_t *)key.mv_data; - total_room = head_room = 0; - mdb_tassert(txn, pglast <= env->me_pglast); - rc = mdb_cursor_del(&mc, 0); - if (rc) - return rc; - } - - /* Save the IDL of pages freed by this txn, to a single record */ - if (freecnt < txn->mt_free_pgs[0]) { - if (!freecnt) { - /* Make sure last page of freeDB is touched and on freelist */ - rc = mdb_page_search(&mc, NULL, MDB_PS_LAST|MDB_PS_MODIFY); - if (rc && rc != MDB_NOTFOUND) - return rc; - } - free_pgs = txn->mt_free_pgs; - /* Write to last page of freeDB */ - key.mv_size = sizeof(txn->mt_txnid); - key.mv_data = &txn->mt_txnid; - do { - freecnt = free_pgs[0]; - data.mv_size = MDB_IDL_SIZEOF(free_pgs); - rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE); - if (rc) - return rc; - /* Retry if mt_free_pgs[] grew during the Put() */ - free_pgs = txn->mt_free_pgs; - } while (freecnt < free_pgs[0]); - mdb_midl_sort(free_pgs); - memcpy(data.mv_data, free_pgs, data.mv_size); -#if (MDB_DEBUG) > 1 - { - unsigned int i = free_pgs[0]; - DPRINTF(("IDL write txn %"Z"u root %"Z"u num %u", - txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i)); - for (; i; i--) - DPRINTF(("IDL %"Z"u", free_pgs[i])); - } -#endif - continue; - } - - mop = env->me_pghead; - mop_len = (mop ? mop[0] : 0) + txn->mt_loose_count; - - /* Reserve records for me_pghead[]. Split it if multi-page, - * to avoid searching freeDB for a page range. Use keys in - * range [1,me_pglast]: Smaller than txnid of oldest reader. - */ - if (total_room >= mop_len) { - if (total_room == mop_len || --more < 0) - break; - } else if (head_room >= maxfree_1pg && head_id > 1) { - /* Keep current record (overflow page), add a new one */ - head_id--; - head_room = 0; - } - /* (Re)write {key = head_id, IDL length = head_room} */ - total_room -= head_room; - head_room = mop_len - total_room; - if (head_room > maxfree_1pg && head_id > 1) { - /* Overflow multi-page for part of me_pghead */ - head_room /= head_id; /* amortize page sizes */ - head_room += maxfree_1pg - head_room % (maxfree_1pg + 1); - } else if (head_room < 0) { - /* Rare case, not bothering to delete this record */ - head_room = 0; - } - key.mv_size = sizeof(head_id); - key.mv_data = &head_id; - data.mv_size = (head_room + 1) * sizeof(pgno_t); - rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE); - if (rc) - return rc; - /* IDL is initially empty, zero out at least the length */ - pgs = (pgno_t *)data.mv_data; - j = head_room > clean_limit ? head_room : 0; - do { - pgs[j] = 0; - } while (--j >= 0); - total_room += head_room; - } - - /* Return loose page numbers to me_pghead, though usually none are - * left at this point. The pages themselves remain in dirty_list. - */ - if (txn->mt_loose_pgs) { - MDB_page *mp = txn->mt_loose_pgs; - unsigned count = txn->mt_loose_count; - MDB_IDL loose; - /* Room for loose pages + temp IDL with same */ - if ((rc = mdb_midl_need(&env->me_pghead, 2*count+1)) != 0) - return rc; - mop = env->me_pghead; - loose = mop + MDB_IDL_ALLOCLEN(mop) - count; - for (count = 0; mp; mp = NEXT_LOOSE_PAGE(mp)) - loose[ ++count ] = mp->mp_pgno; - loose[0] = count; - mdb_midl_sort(loose); - mdb_midl_xmerge(mop, loose); - txn->mt_loose_pgs = NULL; - txn->mt_loose_count = 0; - mop_len = mop[0]; - } - - /* Fill in the reserved me_pghead records */ - rc = MDB_SUCCESS; - if (mop_len) { - MDB_val key, data; - - mop += mop_len; - rc = mdb_cursor_first(&mc, &key, &data); - for (; !rc; rc = mdb_cursor_next(&mc, &key, &data, MDB_NEXT)) { - txnid_t id = *(txnid_t *)key.mv_data; - ssize_t len = (ssize_t)(data.mv_size / sizeof(MDB_ID)) - 1; - MDB_ID save; - - mdb_tassert(txn, len >= 0 && id <= env->me_pglast); - key.mv_data = &id; - if (len > mop_len) { - len = mop_len; - data.mv_size = (len + 1) * sizeof(MDB_ID); - } - data.mv_data = mop -= len; - save = mop[0]; - mop[0] = len; - rc = mdb_cursor_put(&mc, &key, &data, MDB_CURRENT); - mop[0] = save; - if (rc || !(mop_len -= len)) - break; - } - } - return rc; -} - -/** Flush (some) dirty pages to the map, after clearing their dirty flag. - * @param[in] txn the transaction that's being committed - * @param[in] keep number of initial pages in dirty_list to keep dirty. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_flush(MDB_txn *txn, int keep) -{ - MDB_env *env = txn->mt_env; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned psize = env->me_psize, j; - int i, pagecount = dl[0].mid, rc; - size_t size = 0, pos = 0; - pgno_t pgno = 0; - MDB_page *dp = NULL; -#ifdef _WIN32 - OVERLAPPED ov; -#else - struct iovec iov[MDB_COMMIT_PAGES]; - ssize_t wpos = 0, wsize = 0, wres; - size_t next_pos = 1; /* impossible pos, so pos != next_pos */ - int n = 0; -#endif - - j = i = keep; - - if (env->me_flags & MDB_WRITEMAP) { - /* Clear dirty flags */ - while (++i <= pagecount) { - dp = dl[i].mptr; - /* Don't flush this page yet */ - if (dp->mp_flags & (P_LOOSE|P_KEEP)) { - dp->mp_flags &= ~P_KEEP; - dl[++j] = dl[i]; - continue; - } - dp->mp_flags &= ~P_DIRTY; - } - goto done; - } - - /* Write the pages */ - for (;;) { - if (++i <= pagecount) { - dp = dl[i].mptr; - /* Don't flush this page yet */ - if (dp->mp_flags & (P_LOOSE|P_KEEP)) { - dp->mp_flags &= ~P_KEEP; - dl[i].mid = 0; - continue; - } - pgno = dl[i].mid; - /* clear dirty flag */ - dp->mp_flags &= ~P_DIRTY; - pos = pgno * psize; - size = psize; - if (IS_OVERFLOW(dp)) size *= dp->mp_pages; - } -#ifdef _WIN32 - else break; - - /* Windows actually supports scatter/gather I/O, but only on - * unbuffered file handles. Since we're relying on the OS page - * cache for all our data, that's self-defeating. So we just - * write pages one at a time. We use the ov structure to set - * the write offset, to at least save the overhead of a Seek - * system call. - */ - DPRINTF(("committing page %"Z"u", pgno)); - memset(&ov, 0, sizeof(ov)); - ov.Offset = pos & 0xffffffff; - ov.OffsetHigh = pos >> 16 >> 16; - if (!WriteFile(env->me_fd, dp, size, NULL, &ov)) { - rc = ErrCode(); - DPRINTF(("WriteFile: %d", rc)); - return rc; - } -#else - /* Write up to MDB_COMMIT_PAGES dirty pages at a time. */ - if (pos!=next_pos || n==MDB_COMMIT_PAGES || wsize+size>MAX_WRITE) { - if (n) { - /* Write previous page(s) */ -#ifdef MDB_USE_PWRITEV - wres = pwritev(env->me_fd, iov, n, wpos); -#else - if (n == 1) { - wres = pwrite(env->me_fd, iov[0].iov_base, wsize, wpos); - } else { - if (lseek(env->me_fd, wpos, SEEK_SET) == -1) { - rc = ErrCode(); - DPRINTF(("lseek: %s", strerror(rc))); - return rc; - } - wres = writev(env->me_fd, iov, n); - } -#endif - if (wres != wsize) { - if (wres < 0) { - rc = ErrCode(); - DPRINTF(("Write error: %s", strerror(rc))); - } else { - rc = EIO; /* TODO: Use which error code? */ - DPUTS("short write, filesystem full?"); - } - return rc; - } - n = 0; - } - if (i > pagecount) - break; - wpos = pos; - wsize = 0; - } - DPRINTF(("committing page %"Z"u", pgno)); - next_pos = pos + size; - iov[n].iov_len = size; - iov[n].iov_base = (char *)dp; - wsize += size; - n++; -#endif /* _WIN32 */ - } - - /* MIPS has cache coherency issues, this is a no-op everywhere else - * Note: for any size >= on-chip cache size, entire on-chip cache is - * flushed. - */ - CACHEFLUSH(env->me_map, txn->mt_next_pgno * env->me_psize, DCACHE); - - for (i = keep; ++i <= pagecount; ) { - dp = dl[i].mptr; - /* This is a page we skipped above */ - if (!dl[i].mid) { - dl[++j] = dl[i]; - dl[j].mid = dp->mp_pgno; - continue; - } - mdb_dpage_free(env, dp); - } - -done: - i--; - txn->mt_dirty_room += i - j; - dl[0].mid = j; - return MDB_SUCCESS; -} - -int -mdb_txn_commit(MDB_txn *txn) -{ - int rc; - unsigned int i; - MDB_env *env; - - if (txn == NULL || txn->mt_env == NULL) - return EINVAL; - - if (txn->mt_child) { - rc = mdb_txn_commit(txn->mt_child); - txn->mt_child = NULL; - if (rc) - goto fail; - } - - env = txn->mt_env; - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) { - mdb_dbis_update(txn, 1); - txn->mt_numdbs = 2; /* so txn_abort() doesn't close any new handles */ - mdb_txn_abort(txn); - return MDB_SUCCESS; - } - - if (F_ISSET(txn->mt_flags, MDB_TXN_ERROR)) { - DPUTS("error flag is set, can't commit"); - if (txn->mt_parent) - txn->mt_parent->mt_flags |= MDB_TXN_ERROR; - rc = MDB_BAD_TXN; - goto fail; - } - - if (txn->mt_parent) { - MDB_txn *parent = txn->mt_parent; - MDB_page **lp; - MDB_ID2L dst, src; - MDB_IDL pspill; - unsigned x, y, len, ps_len; - - /* Append our free list to parent's */ - rc = mdb_midl_append_list(&parent->mt_free_pgs, txn->mt_free_pgs); - if (rc) - goto fail; - mdb_midl_free(txn->mt_free_pgs); - /* Failures after this must either undo the changes - * to the parent or set MDB_TXN_ERROR in the parent. - */ - - parent->mt_next_pgno = txn->mt_next_pgno; - parent->mt_flags = txn->mt_flags; - - /* Merge our cursors into parent's and close them */ - mdb_cursors_close(txn, 1); - - /* Update parent's DB table. */ - memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); - parent->mt_numdbs = txn->mt_numdbs; - parent->mt_dbflags[0] = txn->mt_dbflags[0]; - parent->mt_dbflags[1] = txn->mt_dbflags[1]; - for (i=2; imt_numdbs; i++) { - /* preserve parent's DB_NEW status */ - x = parent->mt_dbflags[i] & DB_NEW; - parent->mt_dbflags[i] = txn->mt_dbflags[i] | x; - } - - dst = parent->mt_u.dirty_list; - src = txn->mt_u.dirty_list; - /* Remove anything in our dirty list from parent's spill list */ - if ((pspill = parent->mt_spill_pgs) && (ps_len = pspill[0])) { - x = y = ps_len; - pspill[0] = (pgno_t)-1; - /* Mark our dirty pages as deleted in parent spill list */ - for (i=0, len=src[0].mid; ++i <= len; ) { - MDB_ID pn = src[i].mid << 1; - while (pn > pspill[x]) - x--; - if (pn == pspill[x]) { - pspill[x] = 1; - y = --x; - } - } - /* Squash deleted pagenums if we deleted any */ - for (x=y; ++x <= ps_len; ) - if (!(pspill[x] & 1)) - pspill[++y] = pspill[x]; - pspill[0] = y; - } - - /* Find len = length of merging our dirty list with parent's */ - x = dst[0].mid; - dst[0].mid = 0; /* simplify loops */ - if (parent->mt_parent) { - len = x + src[0].mid; - y = mdb_mid2l_search(src, dst[x].mid + 1) - 1; - for (i = x; y && i; y--) { - pgno_t yp = src[y].mid; - while (yp < dst[i].mid) - i--; - if (yp == dst[i].mid) { - i--; - len--; - } - } - } else { /* Simplify the above for single-ancestor case */ - len = MDB_IDL_UM_MAX - txn->mt_dirty_room; - } - /* Merge our dirty list with parent's */ - y = src[0].mid; - for (i = len; y; dst[i--] = src[y--]) { - pgno_t yp = src[y].mid; - while (yp < dst[x].mid) - dst[i--] = dst[x--]; - if (yp == dst[x].mid) - free(dst[x--].mptr); - } - mdb_tassert(txn, i == x); - dst[0].mid = len; - free(txn->mt_u.dirty_list); - parent->mt_dirty_room = txn->mt_dirty_room; - if (txn->mt_spill_pgs) { - if (parent->mt_spill_pgs) { - /* TODO: Prevent failure here, so parent does not fail */ - rc = mdb_midl_append_list(&parent->mt_spill_pgs, txn->mt_spill_pgs); - if (rc) - parent->mt_flags |= MDB_TXN_ERROR; - mdb_midl_free(txn->mt_spill_pgs); - mdb_midl_sort(parent->mt_spill_pgs); - } else { - parent->mt_spill_pgs = txn->mt_spill_pgs; - } - } - - /* Append our loose page list to parent's */ - for (lp = &parent->mt_loose_pgs; *lp; lp = &NEXT_LOOSE_PAGE(lp)) - ; - *lp = txn->mt_loose_pgs; - parent->mt_loose_count += txn->mt_loose_count; - - parent->mt_child = NULL; - mdb_midl_free(((MDB_ntxn *)txn)->mnt_pgstate.mf_pghead); - free(txn); - return rc; - } - - if (txn != env->me_txn) { - DPUTS("attempt to commit unknown transaction"); - rc = EINVAL; - goto fail; - } - - mdb_cursors_close(txn, 0); - - if (!txn->mt_u.dirty_list[0].mid && - !(txn->mt_flags & (MDB_TXN_DIRTY|MDB_TXN_SPILLS))) - goto done; - - DPRINTF(("committing txn %"Z"u %p on mdbenv %p, root page %"Z"u", - txn->mt_txnid, (void*)txn, (void*)env, txn->mt_dbs[MAIN_DBI].md_root)); - - /* Update DB root pointers */ - if (txn->mt_numdbs > 2) { - MDB_cursor mc; - MDB_dbi i; - MDB_val data; - data.mv_size = sizeof(MDB_db); - - mdb_cursor_init(&mc, txn, MAIN_DBI, NULL); - for (i = 2; i < txn->mt_numdbs; i++) { - if (txn->mt_dbflags[i] & DB_DIRTY) { - if (TXN_DBI_CHANGED(txn, i)) { - rc = MDB_BAD_DBI; - goto fail; - } - data.mv_data = &txn->mt_dbs[i]; - rc = mdb_cursor_put(&mc, &txn->mt_dbxs[i].md_name, &data, 0); - if (rc) - goto fail; - } - } - } - - rc = mdb_freelist_save(txn); - if (rc) - goto fail; - - mdb_midl_free(env->me_pghead); - env->me_pghead = NULL; - if (mdb_midl_shrink(&txn->mt_free_pgs)) - env->me_free_pgs = txn->mt_free_pgs; - -#if (MDB_DEBUG) > 2 - mdb_audit(txn); -#endif - - if ((rc = mdb_page_flush(txn, 0)) || - (rc = mdb_env_sync(env, 0)) || - (rc = mdb_env_write_meta(txn))) - goto fail; - - /* Free P_LOOSE pages left behind in dirty_list */ - if (!(env->me_flags & MDB_WRITEMAP)) - mdb_dlist_free(txn); - -done: - env->me_pglast = 0; - env->me_txn = NULL; - mdb_dbis_update(txn, 1); - - if (env->me_txns) - UNLOCK_MUTEX_W(env); - free(txn); - - return MDB_SUCCESS; - -fail: - mdb_txn_abort(txn); - return rc; -} - -/** Read the environment parameters of a DB environment before - * mapping it into memory. - * @param[in] env the environment handle - * @param[out] meta address of where to store the meta information - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_read_header(MDB_env *env, MDB_meta *meta) -{ - MDB_metabuf pbuf; - MDB_page *p; - MDB_meta *m; - int i, rc, off; - enum { Size = sizeof(pbuf) }; - - /* We don't know the page size yet, so use a minimum value. - * Read both meta pages so we can use the latest one. - */ - - for (i=off=0; i<2; i++, off = meta->mm_psize) { -#ifdef _WIN32 - DWORD len; - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - rc = ReadFile(env->me_fd, &pbuf, Size, &len, &ov) ? (int)len : -1; - if (rc == -1 && ErrCode() == ERROR_HANDLE_EOF) - rc = 0; -#else - rc = pread(env->me_fd, &pbuf, Size, off); -#endif - if (rc != Size) { - if (rc == 0 && off == 0) - return ENOENT; - rc = rc < 0 ? (int) ErrCode() : MDB_INVALID; - DPRINTF(("read: %s", mdb_strerror(rc))); - return rc; - } - - p = (MDB_page *)&pbuf; - - if (!F_ISSET(p->mp_flags, P_META)) { - DPRINTF(("page %"Z"u not a meta page", p->mp_pgno)); - return MDB_INVALID; - } - - m = METADATA(p); - if (m->mm_magic != MDB_MAGIC) { - DPUTS("meta has invalid magic"); - return MDB_INVALID; - } - - if (m->mm_version != MDB_DATA_VERSION) { - DPRINTF(("database is version %u, expected version %u", - m->mm_version, MDB_DATA_VERSION)); - return MDB_VERSION_MISMATCH; - } - - if (off == 0 || m->mm_txnid > meta->mm_txnid) - *meta = *m; - } - return 0; -} - -static void ESECT -mdb_env_init_meta0(MDB_env *env, MDB_meta *meta) -{ - meta->mm_magic = MDB_MAGIC; - meta->mm_version = MDB_DATA_VERSION; - meta->mm_mapsize = env->me_mapsize; - meta->mm_psize = env->me_psize; - meta->mm_last_pg = 1; - meta->mm_flags = env->me_flags & 0xffff; - meta->mm_flags |= MDB_INTEGERKEY; - meta->mm_dbs[0].md_root = P_INVALID; - meta->mm_dbs[1].md_root = P_INVALID; -} - -/** Write the environment parameters of a freshly created DB environment. - * @param[in] env the environment handle - * @param[out] meta address of where to store the meta information - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_init_meta(MDB_env *env, MDB_meta *meta) -{ - MDB_page *p, *q; - int rc; - unsigned int psize; -#ifdef _WIN32 - DWORD len; - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); -#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \ - ov.Offset = pos; \ - rc = WriteFile(fd, ptr, size, &len, &ov); } while(0) -#else - int len; -#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \ - len = pwrite(fd, ptr, size, pos); \ - rc = (len >= 0); } while(0) -#endif - - DPUTS("writing new meta page"); - - psize = env->me_psize; - - mdb_env_init_meta0(env, meta); - - p = calloc(2, psize); - p->mp_pgno = 0; - p->mp_flags = P_META; - *(MDB_meta *)METADATA(p) = *meta; - - q = (MDB_page *)((char *)p + psize); - q->mp_pgno = 1; - q->mp_flags = P_META; - *(MDB_meta *)METADATA(q) = *meta; - - DO_PWRITE(rc, env->me_fd, p, psize * 2, len, 0); - if (!rc) - rc = ErrCode(); - else if ((unsigned) len == psize * 2) - rc = MDB_SUCCESS; - else - rc = ENOSPC; - free(p); - return rc; -} - -/** Update the environment info to commit a transaction. - * @param[in] txn the transaction that's being committed - * @return 0 on success, non-zero on failure. - */ -static int -mdb_env_write_meta(MDB_txn *txn) -{ - MDB_env *env; - MDB_meta meta, metab, *mp; - size_t mapsize; - off_t off; - int rc, len, toggle; - char *ptr; - HANDLE mfd; -#ifdef _WIN32 - OVERLAPPED ov; -#else - int r2; -#endif - - toggle = txn->mt_txnid & 1; - DPRINTF(("writing meta page %d for root page %"Z"u", - toggle, txn->mt_dbs[MAIN_DBI].md_root)); - - env = txn->mt_env; - mp = env->me_metas[toggle]; - mapsize = env->me_metas[toggle ^ 1]->mm_mapsize; - /* Persist any increases of mapsize config */ - if (mapsize < env->me_mapsize) - mapsize = env->me_mapsize; - - if (env->me_flags & MDB_WRITEMAP) { - mp->mm_mapsize = mapsize; - mp->mm_dbs[0] = txn->mt_dbs[0]; - mp->mm_dbs[1] = txn->mt_dbs[1]; - mp->mm_last_pg = txn->mt_next_pgno - 1; - mp->mm_txnid = txn->mt_txnid; - if (!(env->me_flags & (MDB_NOMETASYNC|MDB_NOSYNC))) { - unsigned meta_size = env->me_psize; - rc = (env->me_flags & MDB_MAPASYNC) ? MS_ASYNC : MS_SYNC; - ptr = env->me_map; - if (toggle) { -#ifndef _WIN32 /* POSIX msync() requires ptr = start of OS page */ - if (meta_size < env->me_os_psize) - meta_size += meta_size; - else -#endif - ptr += meta_size; - } - if (MDB_MSYNC(ptr, meta_size, rc)) { - rc = ErrCode(); - goto fail; - } - } - goto done; - } - metab.mm_txnid = env->me_metas[toggle]->mm_txnid; - metab.mm_last_pg = env->me_metas[toggle]->mm_last_pg; - - meta.mm_mapsize = mapsize; - meta.mm_dbs[0] = txn->mt_dbs[0]; - meta.mm_dbs[1] = txn->mt_dbs[1]; - meta.mm_last_pg = txn->mt_next_pgno - 1; - meta.mm_txnid = txn->mt_txnid; - - off = offsetof(MDB_meta, mm_mapsize); - ptr = (char *)&meta + off; - len = sizeof(MDB_meta) - off; - if (toggle) - off += env->me_psize; - off += PAGEHDRSZ; - - /* Write to the SYNC fd */ - mfd = env->me_flags & (MDB_NOSYNC|MDB_NOMETASYNC) ? - env->me_fd : env->me_mfd; -#ifdef _WIN32 - { - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - if (!WriteFile(mfd, ptr, len, (DWORD *)&rc, &ov)) - rc = -1; - } -#else - rc = pwrite(mfd, ptr, len, off); -#endif - if (rc != len) { - rc = rc < 0 ? ErrCode() : EIO; - DPUTS("write failed, disk error?"); - /* On a failure, the pagecache still contains the new data. - * Write some old data back, to prevent it from being used. - * Use the non-SYNC fd; we know it will fail anyway. - */ - meta.mm_last_pg = metab.mm_last_pg; - meta.mm_txnid = metab.mm_txnid; -#ifdef _WIN32 - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - WriteFile(env->me_fd, ptr, len, NULL, &ov); -#else - r2 = pwrite(env->me_fd, ptr, len, off); - (void)r2; /* Silence warnings. We don't care about pwrite's return value */ -#endif -fail: - env->me_flags |= MDB_FATAL_ERROR; - return rc; - } - /* MIPS has cache coherency issues, this is a no-op everywhere else */ - CACHEFLUSH(env->me_map + off, len, DCACHE); -done: - /* Memory ordering issues are irrelevant; since the entire writer - * is wrapped by wmutex, all of these changes will become visible - * after the wmutex is unlocked. Since the DB is multi-version, - * readers will get consistent data regardless of how fresh or - * how stale their view of these values is. - */ - if (env->me_txns) - env->me_txns->mti_txnid = txn->mt_txnid; - - return MDB_SUCCESS; -} - -/** Check both meta pages to see which one is newer. - * @param[in] env the environment handle - * @return meta toggle (0 or 1). - */ -static int -mdb_env_pick_meta(const MDB_env *env) -{ - return (env->me_metas[0]->mm_txnid < env->me_metas[1]->mm_txnid); -} - -int ESECT -mdb_env_create(MDB_env **env) -{ - MDB_env *e; - - e = calloc(1, sizeof(MDB_env)); - if (!e) - return ENOMEM; - - e->me_maxreaders = DEFAULT_READERS; - e->me_maxdbs = e->me_numdbs = 2; - e->me_fd = INVALID_HANDLE_VALUE; - e->me_lfd = INVALID_HANDLE_VALUE; - e->me_mfd = INVALID_HANDLE_VALUE; -#ifdef MDB_USE_POSIX_SEM - e->me_rmutex = SEM_FAILED; - e->me_wmutex = SEM_FAILED; -#endif - e->me_pid = getpid(); - GET_PAGESIZE(e->me_os_psize); - VGMEMP_CREATE(e,0,0); - *env = e; - return MDB_SUCCESS; -} - -static int ESECT -mdb_env_map(MDB_env *env, void *addr) -{ - MDB_page *p; - unsigned int flags = env->me_flags; -#ifdef _WIN32 - int rc; - HANDLE mh; - LONG sizelo, sizehi; - size_t msize; - - if (flags & MDB_RDONLY) { - /* Don't set explicit map size, use whatever exists */ - msize = 0; - sizelo = 0; - sizehi = 0; - } else { - msize = env->me_mapsize; - sizelo = msize & 0xffffffff; - sizehi = msize >> 16 >> 16; /* only needed on Win64 */ - - /* Windows won't create mappings for zero length files. - * and won't map more than the file size. - * Just set the maxsize right now. - */ - if (SetFilePointer(env->me_fd, sizelo, &sizehi, 0) != (DWORD)sizelo - || !SetEndOfFile(env->me_fd) - || SetFilePointer(env->me_fd, 0, NULL, 0) != 0) - return ErrCode(); - } - - mh = CreateFileMapping(env->me_fd, NULL, flags & MDB_WRITEMAP ? - PAGE_READWRITE : PAGE_READONLY, - sizehi, sizelo, NULL); - if (!mh) - return ErrCode(); - env->me_map = MapViewOfFileEx(mh, flags & MDB_WRITEMAP ? - FILE_MAP_WRITE : FILE_MAP_READ, - 0, 0, msize, addr); - rc = env->me_map ? 0 : ErrCode(); - CloseHandle(mh); - if (rc) - return rc; -#else - int prot = PROT_READ; - if (flags & MDB_WRITEMAP) { - prot |= PROT_WRITE; - if (ftruncate(env->me_fd, env->me_mapsize) < 0) - return ErrCode(); - } - env->me_map = mmap(addr, env->me_mapsize, prot, MAP_SHARED, - env->me_fd, 0); - if (env->me_map == MAP_FAILED) { - env->me_map = NULL; - return ErrCode(); - } - - if (flags & MDB_NORDAHEAD) { - /* Turn off readahead. It's harmful when the DB is larger than RAM. */ -#ifdef MADV_RANDOM - madvise(env->me_map, env->me_mapsize, MADV_RANDOM); -#else -#ifdef POSIX_MADV_RANDOM - posix_madvise(env->me_map, env->me_mapsize, POSIX_MADV_RANDOM); -#endif /* POSIX_MADV_RANDOM */ -#endif /* MADV_RANDOM */ - } -#endif /* _WIN32 */ - - /* Can happen because the address argument to mmap() is just a - * hint. mmap() can pick another, e.g. if the range is in use. - * The MAP_FIXED flag would prevent that, but then mmap could - * instead unmap existing pages to make room for the new map. - */ - if (addr && env->me_map != addr) - return EBUSY; /* TODO: Make a new MDB_* error code? */ - - p = (MDB_page *)env->me_map; - env->me_metas[0] = METADATA(p); - env->me_metas[1] = (MDB_meta *)((char *)env->me_metas[0] + env->me_psize); - - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_mapsize(MDB_env *env, size_t size) -{ - /* If env is already open, caller is responsible for making - * sure there are no active txns. - */ - if (env->me_map) { - int rc; - void *old; - if (env->me_txn) - return EINVAL; - if (!size) - size = env->me_metas[mdb_env_pick_meta(env)]->mm_mapsize; - else if (size < env->me_mapsize) { - /* If the configured size is smaller, make sure it's - * still big enough. Silently round up to minimum if not. - */ - size_t minsize = (env->me_metas[mdb_env_pick_meta(env)]->mm_last_pg + 1) * env->me_psize; - if (size < minsize) - size = minsize; - } - munmap(env->me_map, env->me_mapsize); - env->me_mapsize = size; - old = (env->me_flags & MDB_FIXEDMAP) ? env->me_map : NULL; - rc = mdb_env_map(env, old); - if (rc) - return rc; - } - env->me_mapsize = size; - if (env->me_psize) - env->me_maxpg = env->me_mapsize / env->me_psize; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs) -{ - if (env->me_map) - return EINVAL; - env->me_maxdbs = dbs + 2; /* Named databases + main and free DB */ - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_maxreaders(MDB_env *env, unsigned int readers) -{ - if (env->me_map || readers < 1) - return EINVAL; - env->me_maxreaders = readers; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers) -{ - if (!env || !readers) - return EINVAL; - *readers = env->me_maxreaders; - return MDB_SUCCESS; -} - -/** Further setup required for opening an LMDB environment - */ -static int ESECT -mdb_env_open2(MDB_env *env) -{ - unsigned int flags = env->me_flags; - int i, newenv = 0, rc; - MDB_meta meta; - -#ifdef _WIN32 - /* See if we should use QueryLimited */ - rc = GetVersion(); - if ((rc & 0xff) > 5) - env->me_pidquery = MDB_PROCESS_QUERY_LIMITED_INFORMATION; - else - env->me_pidquery = PROCESS_QUERY_INFORMATION; -#endif /* _WIN32 */ - - memset(&meta, 0, sizeof(meta)); - - if ((i = mdb_env_read_header(env, &meta)) != 0) { - if (i != ENOENT) - return i; - DPUTS("new mdbenv"); - newenv = 1; - env->me_psize = env->me_os_psize; - if (env->me_psize > MAX_PAGESIZE) - env->me_psize = MAX_PAGESIZE; - } else { - env->me_psize = meta.mm_psize; - } - - /* Was a mapsize configured? */ - if (!env->me_mapsize) { - /* If this is a new environment, take the default, - * else use the size recorded in the existing env. - */ - env->me_mapsize = newenv ? DEFAULT_MAPSIZE : meta.mm_mapsize; - } else if (env->me_mapsize < meta.mm_mapsize) { - /* If the configured size is smaller, make sure it's - * still big enough. Silently round up to minimum if not. - */ - size_t minsize = (meta.mm_last_pg + 1) * meta.mm_psize; - if (env->me_mapsize < minsize) - env->me_mapsize = minsize; - } - - rc = mdb_env_map(env, (flags & MDB_FIXEDMAP) ? meta.mm_address : NULL); - if (rc) - return rc; - - if (newenv) { - if (flags & MDB_FIXEDMAP) - meta.mm_address = env->me_map; - i = mdb_env_init_meta(env, &meta); - if (i != MDB_SUCCESS) { - return i; - } - } - - env->me_maxfree_1pg = (env->me_psize - PAGEHDRSZ) / sizeof(pgno_t) - 1; - env->me_nodemax = (((env->me_psize - PAGEHDRSZ) / MDB_MINKEYS) & -2) - - sizeof(indx_t); -#if !(MDB_MAXKEYSIZE) - env->me_maxkey = env->me_nodemax - (NODESIZE + sizeof(MDB_db)); -#endif - env->me_maxpg = env->me_mapsize / env->me_psize; - -#if MDB_DEBUG - { - int toggle = mdb_env_pick_meta(env); - MDB_db *db = &env->me_metas[toggle]->mm_dbs[MAIN_DBI]; - - DPRINTF(("opened database version %u, pagesize %u", - env->me_metas[0]->mm_version, env->me_psize)); - DPRINTF(("using meta page %d", toggle)); - DPRINTF(("depth: %u", db->md_depth)); - DPRINTF(("entries: %"Z"u", db->md_entries)); - DPRINTF(("branch pages: %"Z"u", db->md_branch_pages)); - DPRINTF(("leaf pages: %"Z"u", db->md_leaf_pages)); - DPRINTF(("overflow pages: %"Z"u", db->md_overflow_pages)); - DPRINTF(("root: %"Z"u", db->md_root)); - } -#endif - - return MDB_SUCCESS; -} - - -/** Release a reader thread's slot in the reader lock table. - * This function is called automatically when a thread exits. - * @param[in] ptr This points to the slot in the reader lock table. - */ -static void -mdb_env_reader_dest(void *ptr) -{ - MDB_reader *reader = ptr; - - reader->mr_pid = 0; -} - -#ifdef _WIN32 -/** Junk for arranging thread-specific callbacks on Windows. This is - * necessarily platform and compiler-specific. Windows supports up - * to 1088 keys. Let's assume nobody opens more than 64 environments - * in a single process, for now. They can override this if needed. - */ -#ifndef MAX_TLS_KEYS -#define MAX_TLS_KEYS 64 -#endif -static pthread_key_t mdb_tls_keys[MAX_TLS_KEYS]; -static int mdb_tls_nkeys; - -static void NTAPI mdb_tls_callback(PVOID module, DWORD reason, PVOID ptr) -{ - int i; - switch(reason) { - case DLL_PROCESS_ATTACH: break; - case DLL_THREAD_ATTACH: break; - case DLL_THREAD_DETACH: - for (i=0; ime_txns->mti_txnid = env->me_metas[toggle]->mm_txnid; - -#ifdef _WIN32 - { - OVERLAPPED ov; - /* First acquire a shared lock. The Unlock will - * then release the existing exclusive lock. - */ - memset(&ov, 0, sizeof(ov)); - if (!LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) { - rc = ErrCode(); - } else { - UnlockFile(env->me_lfd, 0, 0, 1, 0); - *excl = 0; - } - } -#else - { - struct flock lock_info; - /* The shared lock replaces the existing lock */ - memset((void *)&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_RDLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = 0; - lock_info.l_len = 1; - while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - *excl = rc ? -1 : 0; /* error may mean we lost the lock */ - } -#endif - - return rc; -} - -/** Try to get exlusive lock, otherwise shared. - * Maintain *excl = -1: no/unknown lock, 0: shared, 1: exclusive. - */ -static int ESECT -mdb_env_excl_lock(MDB_env *env, int *excl) -{ - int rc = 0; -#ifdef _WIN32 - if (LockFile(env->me_lfd, 0, 0, 1, 0)) { - *excl = 1; - } else { - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); - if (LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) { - *excl = 0; - } else { - rc = ErrCode(); - } - } -#else - struct flock lock_info; - memset((void *)&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_WRLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = 0; - lock_info.l_len = 1; - while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - if (!rc) { - *excl = 1; - } else -# ifdef MDB_USE_POSIX_SEM - if (*excl < 0) /* always true when !MDB_USE_POSIX_SEM */ -# endif - { - lock_info.l_type = F_RDLCK; - while ((rc = fcntl(env->me_lfd, F_SETLKW, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - if (rc == 0) - *excl = 0; - } -#endif - return rc; -} - -#ifdef MDB_USE_HASH -/* - * hash_64 - 64 bit Fowler/Noll/Vo-0 FNV-1a hash code - * - * @(#) $Revision: 5.1 $ - * @(#) $Id: hash_64a.c,v 5.1 2009/06/30 09:01:38 chongo Exp $ - * @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_64a.c,v $ - * - * http://www.isthe.com/chongo/tech/comp/fnv/index.html - * - *** - * - * Please do not copyright this code. This code is in the public domain. - * - * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO - * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF - * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR - * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - * PERFORMANCE OF THIS SOFTWARE. - * - * By: - * chongo /\oo/\ - * http://www.isthe.com/chongo/ - * - * Share and Enjoy! :-) - */ - -typedef unsigned long long mdb_hash_t; -#define MDB_HASH_INIT ((mdb_hash_t)0xcbf29ce484222325ULL) - -/** perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer - * @param[in] val value to hash - * @param[in] hval initial value for hash - * @return 64 bit hash - * - * NOTE: To use the recommended 64 bit FNV-1a hash, use MDB_HASH_INIT as the - * hval arg on the first call. - */ -static mdb_hash_t -mdb_hash_val(MDB_val *val, mdb_hash_t hval) -{ - unsigned char *s = (unsigned char *)val->mv_data; /* unsigned string */ - unsigned char *end = s + val->mv_size; - /* - * FNV-1a hash each octet of the string - */ - while (s < end) { - /* xor the bottom with the current octet */ - hval ^= (mdb_hash_t)*s++; - - /* multiply by the 64 bit FNV magic prime mod 2^64 */ - hval += (hval << 1) + (hval << 4) + (hval << 5) + - (hval << 7) + (hval << 8) + (hval << 40); - } - /* return our new hash value */ - return hval; -} - -/** Hash the string and output the encoded hash. - * This uses modified RFC1924 Ascii85 encoding to accommodate systems with - * very short name limits. We don't care about the encoding being reversible, - * we just want to preserve as many bits of the input as possible in a - * small printable string. - * @param[in] str string to hash - * @param[out] encbuf an array of 11 chars to hold the hash - */ -static const char mdb_a85[]= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"; - -static void -mdb_pack85(unsigned long l, char *out) -{ - int i; - - for (i=0; i<5; i++) { - *out++ = mdb_a85[l % 85]; - l /= 85; - } -} - -static void -mdb_hash_enc(MDB_val *val, char *encbuf) -{ - mdb_hash_t h = mdb_hash_val(val, MDB_HASH_INIT); - - mdb_pack85(h, encbuf); - mdb_pack85(h>>32, encbuf+5); - encbuf[10] = '\0'; -} -#endif - -/** Open and/or initialize the lock region for the environment. - * @param[in] env The LMDB environment. - * @param[in] lpath The pathname of the file used for the lock region. - * @param[in] mode The Unix permissions for the file, if we create it. - * @param[out] excl Resulting file lock type: -1 none, 0 shared, 1 exclusive - * @param[in,out] excl In -1, out lock type: -1 none, 0 shared, 1 exclusive - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_setup_locks(MDB_env *env, char *lpath, int mode, int *excl) -{ -#ifdef _WIN32 -# define MDB_ERRCODE_ROFS ERROR_WRITE_PROTECT -#else -# define MDB_ERRCODE_ROFS EROFS -#ifdef O_CLOEXEC /* Linux: Open file and set FD_CLOEXEC atomically */ -# define MDB_CLOEXEC O_CLOEXEC -#else - int fdflags; -# define MDB_CLOEXEC 0 -#endif -#endif - int rc; - off_t size, rsize; - -#ifdef _WIN32 - env->me_lfd = CreateFile(lpath, GENERIC_READ|GENERIC_WRITE, - FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, - FILE_ATTRIBUTE_NORMAL, NULL); -#else - env->me_lfd = open(lpath, O_RDWR|O_CREAT|MDB_CLOEXEC, mode); -#endif - if (env->me_lfd == INVALID_HANDLE_VALUE) { - rc = ErrCode(); - if (rc == MDB_ERRCODE_ROFS && (env->me_flags & MDB_RDONLY)) { - return MDB_SUCCESS; - } - goto fail_errno; - } -#if ! ((MDB_CLOEXEC) || defined(_WIN32)) - /* Lose record locks when exec*() */ - if ((fdflags = fcntl(env->me_lfd, F_GETFD) | FD_CLOEXEC) >= 0) - fcntl(env->me_lfd, F_SETFD, fdflags); -#endif - - if (!(env->me_flags & MDB_NOTLS)) { - rc = pthread_key_create(&env->me_txkey, mdb_env_reader_dest); - if (rc) - goto fail; - env->me_flags |= MDB_ENV_TXKEY; -#ifdef _WIN32 - /* Windows TLS callbacks need help finding their TLS info. */ - if (mdb_tls_nkeys >= MAX_TLS_KEYS) { - rc = MDB_TLS_FULL; - goto fail; - } - mdb_tls_keys[mdb_tls_nkeys++] = env->me_txkey; -#endif - } - - /* Try to get exclusive lock. If we succeed, then - * nobody is using the lock region and we should initialize it. - */ - if ((rc = mdb_env_excl_lock(env, excl))) goto fail; - -#ifdef _WIN32 - size = GetFileSize(env->me_lfd, NULL); -#else - size = lseek(env->me_lfd, 0, SEEK_END); - if (size == -1) goto fail_errno; -#endif - rsize = (env->me_maxreaders-1) * sizeof(MDB_reader) + sizeof(MDB_txninfo); - if (size < rsize && *excl > 0) { -#ifdef _WIN32 - if (SetFilePointer(env->me_lfd, rsize, NULL, FILE_BEGIN) != (DWORD)rsize - || !SetEndOfFile(env->me_lfd)) - goto fail_errno; -#else - if (ftruncate(env->me_lfd, rsize) != 0) goto fail_errno; -#endif - } else { - rsize = size; - size = rsize - sizeof(MDB_txninfo); - env->me_maxreaders = size/sizeof(MDB_reader) + 1; - } - { -#ifdef _WIN32 - HANDLE mh; - mh = CreateFileMapping(env->me_lfd, NULL, PAGE_READWRITE, - 0, 0, NULL); - if (!mh) goto fail_errno; - env->me_txns = MapViewOfFileEx(mh, FILE_MAP_WRITE, 0, 0, rsize, NULL); - CloseHandle(mh); - if (!env->me_txns) goto fail_errno; -#else - void *m = mmap(NULL, rsize, PROT_READ|PROT_WRITE, MAP_SHARED, - env->me_lfd, 0); - if (m == MAP_FAILED) goto fail_errno; - env->me_txns = m; -#endif - } - if (*excl > 0) { -#ifdef _WIN32 - BY_HANDLE_FILE_INFORMATION stbuf; - struct { - DWORD volume; - DWORD nhigh; - DWORD nlow; - } idbuf; - MDB_val val; - char encbuf[11]; - - if (!mdb_sec_inited) { - InitializeSecurityDescriptor(&mdb_null_sd, - SECURITY_DESCRIPTOR_REVISION); - SetSecurityDescriptorDacl(&mdb_null_sd, TRUE, 0, FALSE); - mdb_all_sa.nLength = sizeof(SECURITY_ATTRIBUTES); - mdb_all_sa.bInheritHandle = FALSE; - mdb_all_sa.lpSecurityDescriptor = &mdb_null_sd; - mdb_sec_inited = 1; - } - if (!GetFileInformationByHandle(env->me_lfd, &stbuf)) goto fail_errno; - idbuf.volume = stbuf.dwVolumeSerialNumber; - idbuf.nhigh = stbuf.nFileIndexHigh; - idbuf.nlow = stbuf.nFileIndexLow; - val.mv_data = &idbuf; - val.mv_size = sizeof(idbuf); - mdb_hash_enc(&val, encbuf); - sprintf(env->me_txns->mti_rmname, "Global\\MDBr%s", encbuf); - sprintf(env->me_txns->mti_wmname, "Global\\MDBw%s", encbuf); - env->me_rmutex = CreateMutex(&mdb_all_sa, FALSE, env->me_txns->mti_rmname); - if (!env->me_rmutex) goto fail_errno; - env->me_wmutex = CreateMutex(&mdb_all_sa, FALSE, env->me_txns->mti_wmname); - if (!env->me_wmutex) goto fail_errno; -#elif defined(MDB_USE_POSIX_SEM) - struct stat stbuf; - struct { - dev_t dev; - ino_t ino; - } idbuf; - MDB_val val; - char encbuf[11]; - -#if defined(__NetBSD__) -#define MDB_SHORT_SEMNAMES 1 /* limited to 14 chars */ -#endif - if (fstat(env->me_lfd, &stbuf)) goto fail_errno; - idbuf.dev = stbuf.st_dev; - idbuf.ino = stbuf.st_ino; - val.mv_data = &idbuf; - val.mv_size = sizeof(idbuf); - mdb_hash_enc(&val, encbuf); -#ifdef MDB_SHORT_SEMNAMES - encbuf[9] = '\0'; /* drop name from 15 chars to 14 chars */ -#endif - sprintf(env->me_txns->mti_rmname, "/MDBr%s", encbuf); - sprintf(env->me_txns->mti_wmname, "/MDBw%s", encbuf); - /* Clean up after a previous run, if needed: Try to - * remove both semaphores before doing anything else. - */ - sem_unlink(env->me_txns->mti_rmname); - sem_unlink(env->me_txns->mti_wmname); - env->me_rmutex = sem_open(env->me_txns->mti_rmname, - O_CREAT|O_EXCL, mode, 1); - if (env->me_rmutex == SEM_FAILED) goto fail_errno; - env->me_wmutex = sem_open(env->me_txns->mti_wmname, - O_CREAT|O_EXCL, mode, 1); - if (env->me_wmutex == SEM_FAILED) goto fail_errno; -#else /* MDB_USE_POSIX_SEM */ - pthread_mutexattr_t mattr; - - if ((rc = pthread_mutexattr_init(&mattr)) - || (rc = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED)) - || (rc = pthread_mutex_init(&env->me_txns->mti_mutex, &mattr)) - || (rc = pthread_mutex_init(&env->me_txns->mti_wmutex, &mattr))) - goto fail; - pthread_mutexattr_destroy(&mattr); -#endif /* _WIN32 || MDB_USE_POSIX_SEM */ - - env->me_txns->mti_magic = MDB_MAGIC; - env->me_txns->mti_format = MDB_LOCK_FORMAT; - env->me_txns->mti_txnid = 0; - env->me_txns->mti_numreaders = 0; - - } else { - if (env->me_txns->mti_magic != MDB_MAGIC) { - DPUTS("lock region has invalid magic"); - rc = MDB_INVALID; - goto fail; - } - if (env->me_txns->mti_format != MDB_LOCK_FORMAT) { - DPRINTF(("lock region has format+version 0x%x, expected 0x%x", - env->me_txns->mti_format, MDB_LOCK_FORMAT)); - rc = MDB_VERSION_MISMATCH; - goto fail; - } - rc = ErrCode(); - if (rc && rc != EACCES && rc != EAGAIN) { - goto fail; - } -#ifdef _WIN32 - env->me_rmutex = OpenMutex(SYNCHRONIZE, FALSE, env->me_txns->mti_rmname); - if (!env->me_rmutex) goto fail_errno; - env->me_wmutex = OpenMutex(SYNCHRONIZE, FALSE, env->me_txns->mti_wmname); - if (!env->me_wmutex) goto fail_errno; -#elif defined(MDB_USE_POSIX_SEM) - env->me_rmutex = sem_open(env->me_txns->mti_rmname, 0); - if (env->me_rmutex == SEM_FAILED) goto fail_errno; - env->me_wmutex = sem_open(env->me_txns->mti_wmname, 0); - if (env->me_wmutex == SEM_FAILED) goto fail_errno; -#endif - } - return MDB_SUCCESS; - -fail_errno: - rc = ErrCode(); -fail: - return rc; -} - - /** The name of the lock file in the DB environment */ -#define LOCKNAME "/lock.mdb" - /** The name of the data file in the DB environment */ -#define DATANAME "/data.mdb" - /** The suffix of the lock file when no subdir is used */ -#define LOCKSUFF "-lock" - /** Only a subset of the @ref mdb_env flags can be changed - * at runtime. Changing other flags requires closing the - * environment and re-opening it with the new flags. - */ -#define CHANGEABLE (MDB_NOSYNC|MDB_NOMETASYNC|MDB_MAPASYNC|MDB_NOMEMINIT) -#define CHANGELESS (MDB_FIXEDMAP|MDB_NOSUBDIR|MDB_RDONLY|MDB_WRITEMAP| \ - MDB_NOTLS|MDB_NOLOCK|MDB_NORDAHEAD) - -#if VALID_FLAGS & PERSISTENT_FLAGS & (CHANGEABLE|CHANGELESS) -# error "Persistent DB flags & env flags overlap, but both go in mm_flags" -#endif - -int ESECT -mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode) -{ - int oflags, rc, len, excl = -1; - char *lpath, *dpath; - - if (env->me_fd!=INVALID_HANDLE_VALUE || (flags & ~(CHANGEABLE|CHANGELESS))) - return EINVAL; - - len = strlen(path); - if (flags & MDB_NOSUBDIR) { - rc = len + sizeof(LOCKSUFF) + len + 1; - } else { - rc = len + sizeof(LOCKNAME) + len + sizeof(DATANAME); - } - lpath = malloc(rc); - if (!lpath) - return ENOMEM; - if (flags & MDB_NOSUBDIR) { - dpath = lpath + len + sizeof(LOCKSUFF); - sprintf(lpath, "%s" LOCKSUFF, path); - strcpy(dpath, path); - } else { - dpath = lpath + len + sizeof(LOCKNAME); - sprintf(lpath, "%s" LOCKNAME, path); - sprintf(dpath, "%s" DATANAME, path); - } - - rc = MDB_SUCCESS; - flags |= env->me_flags; - if (flags & MDB_RDONLY) { - /* silently ignore WRITEMAP when we're only getting read access */ - flags &= ~MDB_WRITEMAP; - } else { - if (!((env->me_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX)) && - (env->me_dirty_list = calloc(MDB_IDL_UM_SIZE, sizeof(MDB_ID2))))) - rc = ENOMEM; - } - env->me_flags = flags |= MDB_ENV_ACTIVE; - if (rc) - goto leave; - - env->me_path = strdup(path); - env->me_dbxs = calloc(env->me_maxdbs, sizeof(MDB_dbx)); - env->me_dbflags = calloc(env->me_maxdbs, sizeof(uint16_t)); - env->me_dbiseqs = calloc(env->me_maxdbs, sizeof(unsigned int)); - if (!(env->me_dbxs && env->me_path && env->me_dbflags && env->me_dbiseqs)) { - rc = ENOMEM; - goto leave; - } - - /* For RDONLY, get lockfile after we know datafile exists */ - if (!(flags & (MDB_RDONLY|MDB_NOLOCK))) { - rc = mdb_env_setup_locks(env, lpath, mode, &excl); - if (rc) - goto leave; - } - -#ifdef _WIN32 - if (F_ISSET(flags, MDB_RDONLY)) { - oflags = GENERIC_READ; - len = OPEN_EXISTING; - } else { - oflags = GENERIC_READ|GENERIC_WRITE; - len = OPEN_ALWAYS; - } - mode = FILE_ATTRIBUTE_NORMAL; - env->me_fd = CreateFile(dpath, oflags, FILE_SHARE_READ|FILE_SHARE_WRITE, - NULL, len, mode, NULL); -#else - if (F_ISSET(flags, MDB_RDONLY)) - oflags = O_RDONLY; - else - oflags = O_RDWR | O_CREAT; - - env->me_fd = open(dpath, oflags, mode); -#endif - if (env->me_fd == INVALID_HANDLE_VALUE) { - rc = ErrCode(); - goto leave; - } - - if ((flags & (MDB_RDONLY|MDB_NOLOCK)) == MDB_RDONLY) { - rc = mdb_env_setup_locks(env, lpath, mode, &excl); - if (rc) - goto leave; - } - - if ((rc = mdb_env_open2(env)) == MDB_SUCCESS) { - if (flags & (MDB_RDONLY|MDB_WRITEMAP)) { - env->me_mfd = env->me_fd; - } else { - /* Synchronous fd for meta writes. Needed even with - * MDB_NOSYNC/MDB_NOMETASYNC, in case these get reset. - */ -#ifdef _WIN32 - len = OPEN_EXISTING; - env->me_mfd = CreateFile(dpath, oflags, - FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, len, - mode | FILE_FLAG_WRITE_THROUGH, NULL); -#else - oflags &= ~O_CREAT; - env->me_mfd = open(dpath, oflags | MDB_DSYNC, mode); -#endif - if (env->me_mfd == INVALID_HANDLE_VALUE) { - rc = ErrCode(); - goto leave; - } - } - DPRINTF(("opened dbenv %p", (void *) env)); - if (excl > 0) { - rc = mdb_env_share_locks(env, &excl); - if (rc) - goto leave; - } - if (!((flags & MDB_RDONLY) || - (env->me_pbuf = calloc(1, env->me_psize)))) - rc = ENOMEM; - } - -leave: - if (rc) { - mdb_env_close0(env, excl); - } - free(lpath); - return rc; -} - -/** Destroy resources from mdb_env_open(), clear our readers & DBIs */ -static void ESECT -mdb_env_close0(MDB_env *env, int excl) -{ - int i; - - if (!(env->me_flags & MDB_ENV_ACTIVE)) - return; - - /* Doing this here since me_dbxs may not exist during mdb_env_close */ - for (i = env->me_maxdbs; --i > MAIN_DBI; ) - free(env->me_dbxs[i].md_name.mv_data); - - free(env->me_pbuf); - free(env->me_dbiseqs); - free(env->me_dbflags); - free(env->me_dbxs); - free(env->me_path); - free(env->me_dirty_list); - mdb_midl_free(env->me_free_pgs); - - if (env->me_flags & MDB_ENV_TXKEY) { - pthread_key_delete(env->me_txkey); -#ifdef _WIN32 - /* Delete our key from the global list */ - for (i=0; ime_txkey) { - mdb_tls_keys[i] = mdb_tls_keys[mdb_tls_nkeys-1]; - mdb_tls_nkeys--; - break; - } -#endif - } - - if (env->me_map) { - munmap(env->me_map, env->me_mapsize); - } - if (env->me_mfd != env->me_fd && env->me_mfd != INVALID_HANDLE_VALUE) - (void) close(env->me_mfd); - if (env->me_fd != INVALID_HANDLE_VALUE) - (void) close(env->me_fd); - if (env->me_txns) { - MDB_PID_T pid = env->me_pid; - /* Clearing readers is done in this function because - * me_txkey with its destructor must be disabled first. - */ - for (i = env->me_numreaders; --i >= 0; ) - if (env->me_txns->mti_readers[i].mr_pid == pid) - env->me_txns->mti_readers[i].mr_pid = 0; -#ifdef _WIN32 - if (env->me_rmutex) { - CloseHandle(env->me_rmutex); - if (env->me_wmutex) CloseHandle(env->me_wmutex); - } - /* Windows automatically destroys the mutexes when - * the last handle closes. - */ -#elif defined(MDB_USE_POSIX_SEM) - if (env->me_rmutex != SEM_FAILED) { - sem_close(env->me_rmutex); - if (env->me_wmutex != SEM_FAILED) - sem_close(env->me_wmutex); - /* If we have the filelock: If we are the - * only remaining user, clean up semaphores. - */ - if (excl == 0) - mdb_env_excl_lock(env, &excl); - if (excl > 0) { - sem_unlink(env->me_txns->mti_rmname); - sem_unlink(env->me_txns->mti_wmname); - } - } -#endif - munmap((void *)env->me_txns, (env->me_maxreaders-1)*sizeof(MDB_reader)+sizeof(MDB_txninfo)); - } - if (env->me_lfd != INVALID_HANDLE_VALUE) { -#ifdef _WIN32 - if (excl >= 0) { - /* Unlock the lockfile. Windows would have unlocked it - * after closing anyway, but not necessarily at once. - */ - UnlockFile(env->me_lfd, 0, 0, 1, 0); - } -#endif - (void) close(env->me_lfd); - } - - env->me_flags &= ~(MDB_ENV_ACTIVE|MDB_ENV_TXKEY); -} - - -void ESECT -mdb_env_close(MDB_env *env) -{ - MDB_page *dp; - - if (env == NULL) - return; - - VGMEMP_DESTROY(env); - while ((dp = env->me_dpages) != NULL) { - VGMEMP_DEFINED(&dp->mp_next, sizeof(dp->mp_next)); - env->me_dpages = dp->mp_next; - free(dp); - } - - mdb_env_close0(env, 0); - free(env); -} - -/** Compare two items pointing at aligned size_t's */ -static int -mdb_cmp_long(const MDB_val *a, const MDB_val *b) -{ - return (*(size_t *)a->mv_data < *(size_t *)b->mv_data) ? -1 : - *(size_t *)a->mv_data > *(size_t *)b->mv_data; -} - -/** Compare two items pointing at aligned unsigned int's */ -static int -mdb_cmp_int(const MDB_val *a, const MDB_val *b) -{ - return (*(unsigned int *)a->mv_data < *(unsigned int *)b->mv_data) ? -1 : - *(unsigned int *)a->mv_data > *(unsigned int *)b->mv_data; -} - -/** Compare two items pointing at unsigned ints of unknown alignment. - * Nodes and keys are guaranteed to be 2-byte aligned. - */ -static int -mdb_cmp_cint(const MDB_val *a, const MDB_val *b) -{ -#if BYTE_ORDER == LITTLE_ENDIAN - unsigned short *u, *c; - int x; - - u = (unsigned short *) ((char *) a->mv_data + a->mv_size); - c = (unsigned short *) ((char *) b->mv_data + a->mv_size); - do { - x = *--u - *--c; - } while(!x && u > (unsigned short *)a->mv_data); - return x; -#else - unsigned short *u, *c, *end; - int x; - - end = (unsigned short *) ((char *) a->mv_data + a->mv_size); - u = (unsigned short *)a->mv_data; - c = (unsigned short *)b->mv_data; - do { - x = *u++ - *c++; - } while(!x && u < end); - return x; -#endif -} - -/** Compare two items pointing at size_t's of unknown alignment. */ -#ifdef MISALIGNED_OK -# define mdb_cmp_clong mdb_cmp_long -#else -# define mdb_cmp_clong mdb_cmp_cint -#endif - -/** Compare two items lexically */ -static int -mdb_cmp_memn(const MDB_val *a, const MDB_val *b) -{ - int diff; - ssize_t len_diff; - unsigned int len; - - len = a->mv_size; - len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size; - if (len_diff > 0) { - len = b->mv_size; - len_diff = 1; - } - - diff = memcmp(a->mv_data, b->mv_data, len); - return diff ? diff : len_diff<0 ? -1 : len_diff; -} - -/** Compare two items in reverse byte order */ -static int -mdb_cmp_memnr(const MDB_val *a, const MDB_val *b) -{ - const unsigned char *p1, *p2, *p1_lim; - ssize_t len_diff; - int diff; - - p1_lim = (const unsigned char *)a->mv_data; - p1 = (const unsigned char *)a->mv_data + a->mv_size; - p2 = (const unsigned char *)b->mv_data + b->mv_size; - - len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size; - if (len_diff > 0) { - p1_lim += len_diff; - len_diff = 1; - } - - while (p1 > p1_lim) { - diff = *--p1 - *--p2; - if (diff) - return diff; - } - return len_diff<0 ? -1 : len_diff; -} - -/** Search for key within a page, using binary search. - * Returns the smallest entry larger or equal to the key. - * If exactp is non-null, stores whether the found entry was an exact match - * in *exactp (1 or 0). - * Updates the cursor index with the index of the found entry. - * If no entry larger or equal to the key is found, returns NULL. - */ -static MDB_node * -mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp) -{ - unsigned int i = 0, nkeys; - int low, high; - int rc = 0; - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_node *node = NULL; - MDB_val nodekey; - MDB_cmp_func *cmp; - DKBUF; - - nkeys = NUMKEYS(mp); - - DPRINTF(("searching %u keys in %s %spage %"Z"u", - nkeys, IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "", - mdb_dbg_pgno(mp))); - - low = IS_LEAF(mp) ? 0 : 1; - high = nkeys - 1; - cmp = mc->mc_dbx->md_cmp; - - /* Branch pages have no data, so if using integer keys, - * alignment is guaranteed. Use faster mdb_cmp_int. - */ - if (cmp == mdb_cmp_cint && IS_BRANCH(mp)) { - if (NODEPTR(mp, 1)->mn_ksize == sizeof(size_t)) - cmp = mdb_cmp_long; - else - cmp = mdb_cmp_int; - } - - if (IS_LEAF2(mp)) { - nodekey.mv_size = mc->mc_db->md_pad; - node = NODEPTR(mp, 0); /* fake */ - while (low <= high) { - i = (low + high) >> 1; - nodekey.mv_data = LEAF2KEY(mp, i, nodekey.mv_size); - rc = cmp(key, &nodekey); - DPRINTF(("found leaf index %u [%s], rc = %i", - i, DKEY(&nodekey), rc)); - if (rc == 0) - break; - if (rc > 0) - low = i + 1; - else - high = i - 1; - } - } else { - while (low <= high) { - i = (low + high) >> 1; - - node = NODEPTR(mp, i); - nodekey.mv_size = NODEKSZ(node); - nodekey.mv_data = NODEKEY(node); - - rc = cmp(key, &nodekey); -#if MDB_DEBUG - if (IS_LEAF(mp)) - DPRINTF(("found leaf index %u [%s], rc = %i", - i, DKEY(&nodekey), rc)); - else - DPRINTF(("found branch index %u [%s -> %"Z"u], rc = %i", - i, DKEY(&nodekey), NODEPGNO(node), rc)); -#endif - if (rc == 0) - break; - if (rc > 0) - low = i + 1; - else - high = i - 1; - } - } - - if (rc > 0) { /* Found entry is less than the key. */ - i++; /* Skip to get the smallest entry larger than key. */ - if (!IS_LEAF2(mp)) - node = NODEPTR(mp, i); - } - if (exactp) - *exactp = (rc == 0 && nkeys > 0); - /* store the key index */ - mc->mc_ki[mc->mc_top] = i; - if (i >= nkeys) - /* There is no entry larger or equal to the key. */ - return NULL; - - /* nodeptr is fake for LEAF2 */ - return node; -} - -#if 0 -static void -mdb_cursor_adjust(MDB_cursor *mc, func) -{ - MDB_cursor *m2; - - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2->mc_pg[m2->mc_top] == mc->mc_pg[mc->mc_top]) { - func(mc, m2); - } - } -} -#endif - -/** Pop a page off the top of the cursor's stack. */ -static void -mdb_cursor_pop(MDB_cursor *mc) -{ - if (mc->mc_snum) { -#if MDB_DEBUG - MDB_page *top = mc->mc_pg[mc->mc_top]; -#endif - mc->mc_snum--; - if (mc->mc_snum) - mc->mc_top--; - - DPRINTF(("popped page %"Z"u off db %d cursor %p", top->mp_pgno, - DDBI(mc), (void *) mc)); - } -} - -/** Push a page onto the top of the cursor's stack. */ -static int -mdb_cursor_push(MDB_cursor *mc, MDB_page *mp) -{ - DPRINTF(("pushing page %"Z"u on db %d cursor %p", mp->mp_pgno, - DDBI(mc), (void *) mc)); - - if (mc->mc_snum >= CURSOR_STACK) { - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CURSOR_FULL; - } - - mc->mc_top = mc->mc_snum++; - mc->mc_pg[mc->mc_top] = mp; - mc->mc_ki[mc->mc_top] = 0; - - return MDB_SUCCESS; -} - -/** Find the address of the page corresponding to a given page number. - * @param[in] txn the transaction for this access. - * @param[in] pgno the page number for the page to retrieve. - * @param[out] ret address of a pointer where the page's address will be stored. - * @param[out] lvl dirty_list inheritance level of found page. 1=current txn, 0=mapped page. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_get(MDB_txn *txn, pgno_t pgno, MDB_page **ret, int *lvl) -{ - MDB_env *env = txn->mt_env; - MDB_page *p = NULL; - int level; - - if (!((txn->mt_flags & MDB_TXN_RDONLY) | (env->me_flags & MDB_WRITEMAP))) { - MDB_txn *tx2 = txn; - level = 1; - do { - MDB_ID2L dl = tx2->mt_u.dirty_list; - unsigned x; - /* Spilled pages were dirtied in this txn and flushed - * because the dirty list got full. Bring this page - * back in from the map (but don't unspill it here, - * leave that unless page_touch happens again). - */ - if (tx2->mt_spill_pgs) { - MDB_ID pn = pgno << 1; - x = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) { - p = (MDB_page *)(env->me_map + env->me_psize * pgno); - goto done; - } - } - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - p = dl[x].mptr; - goto done; - } - } - level++; - } while ((tx2 = tx2->mt_parent) != NULL); - } - - if (pgno < txn->mt_next_pgno) { - level = 0; - p = (MDB_page *)(env->me_map + env->me_psize * pgno); - } else { - DPRINTF(("page %"Z"u not found", pgno)); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PAGE_NOTFOUND; - } - -done: - *ret = p; - if (lvl) - *lvl = level; - return MDB_SUCCESS; -} - -/** Finish #mdb_page_search() / #mdb_page_search_lowest(). - * The cursor is at the root page, set up the rest of it. - */ -static int -mdb_page_search_root(MDB_cursor *mc, MDB_val *key, int flags) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - int rc; - DKBUF; - - while (IS_BRANCH(mp)) { - MDB_node *node; - indx_t i; - - DPRINTF(("branch page %"Z"u has %u keys", mp->mp_pgno, NUMKEYS(mp))); - mdb_cassert(mc, NUMKEYS(mp) > 1); - DPRINTF(("found index 0 to page %"Z"u", NODEPGNO(NODEPTR(mp, 0)))); - - if (flags & (MDB_PS_FIRST|MDB_PS_LAST)) { - i = 0; - if (flags & MDB_PS_LAST) - i = NUMKEYS(mp) - 1; - } else { - int exact; - node = mdb_node_search(mc, key, &exact); - if (node == NULL) - i = NUMKEYS(mp) - 1; - else { - i = mc->mc_ki[mc->mc_top]; - if (!exact) { - mdb_cassert(mc, i > 0); - i--; - } - } - DPRINTF(("following index %u for key [%s]", i, DKEY(key))); - } - - mdb_cassert(mc, i < NUMKEYS(mp)); - node = NODEPTR(mp, i); - - if ((rc = mdb_page_get(mc->mc_txn, NODEPGNO(node), &mp, NULL)) != 0) - return rc; - - mc->mc_ki[mc->mc_top] = i; - if ((rc = mdb_cursor_push(mc, mp))) - return rc; - - if (flags & MDB_PS_MODIFY) { - if ((rc = mdb_page_touch(mc)) != 0) - return rc; - mp = mc->mc_pg[mc->mc_top]; - } - } - - if (!IS_LEAF(mp)) { - DPRINTF(("internal error, index points to a %02X page!?", - mp->mp_flags)); - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - - DPRINTF(("found leaf page %"Z"u for key [%s]", mp->mp_pgno, - key ? DKEY(key) : "null")); - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - return MDB_SUCCESS; -} - -/** Search for the lowest key under the current branch page. - * This just bypasses a NUMKEYS check in the current page - * before calling mdb_page_search_root(), because the callers - * are all in situations where the current page is known to - * be underfilled. - */ -static int -mdb_page_search_lowest(MDB_cursor *mc) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_node *node = NODEPTR(mp, 0); - int rc; - - if ((rc = mdb_page_get(mc->mc_txn, NODEPGNO(node), &mp, NULL)) != 0) - return rc; - - mc->mc_ki[mc->mc_top] = 0; - if ((rc = mdb_cursor_push(mc, mp))) - return rc; - return mdb_page_search_root(mc, NULL, MDB_PS_FIRST); -} - -/** Search for the page a given key should be in. - * Push it and its parent pages on the cursor stack. - * @param[in,out] mc the cursor for this operation. - * @param[in] key the key to search for, or NULL for first/last page. - * @param[in] flags If MDB_PS_MODIFY is set, visited pages in the DB - * are touched (updated with new page numbers). - * If MDB_PS_FIRST or MDB_PS_LAST is set, find first or last leaf. - * This is used by #mdb_cursor_first() and #mdb_cursor_last(). - * If MDB_PS_ROOTONLY set, just fetch root node, no further lookups. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_search(MDB_cursor *mc, MDB_val *key, int flags) -{ - int rc; - pgno_t root; - - /* Make sure the txn is still viable, then find the root from - * the txn's db table and set it as the root of the cursor's stack. - */ - if (F_ISSET(mc->mc_txn->mt_flags, MDB_TXN_ERROR)) { - DPUTS("transaction has failed, must abort"); - return MDB_BAD_TXN; - } else { - /* Make sure we're using an up-to-date root */ - if (*mc->mc_dbflag & DB_STALE) { - MDB_cursor mc2; - if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) - return MDB_BAD_DBI; - mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, NULL); - rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, 0); - if (rc) - return rc; - { - MDB_val data; - int exact = 0; - uint16_t flags; - MDB_node *leaf = mdb_node_search(&mc2, - &mc->mc_dbx->md_name, &exact); - if (!exact) - return MDB_NOTFOUND; - rc = mdb_node_read(mc->mc_txn, leaf, &data); - if (rc) - return rc; - memcpy(&flags, ((char *) data.mv_data + offsetof(MDB_db, md_flags)), - sizeof(uint16_t)); - /* The txn may not know this DBI, or another process may - * have dropped and recreated the DB with other flags. - */ - if ((mc->mc_db->md_flags & PERSISTENT_FLAGS) != flags) - return MDB_INCOMPATIBLE; - memcpy(mc->mc_db, data.mv_data, sizeof(MDB_db)); - } - *mc->mc_dbflag &= ~DB_STALE; - } - root = mc->mc_db->md_root; - - if (root == P_INVALID) { /* Tree is empty. */ - DPUTS("tree is empty"); - return MDB_NOTFOUND; - } - } - - mdb_cassert(mc, root > 1); - if (!mc->mc_pg[0] || mc->mc_pg[0]->mp_pgno != root) - if ((rc = mdb_page_get(mc->mc_txn, root, &mc->mc_pg[0], NULL)) != 0) - return rc; - - mc->mc_snum = 1; - mc->mc_top = 0; - - DPRINTF(("db %d root page %"Z"u has flags 0x%X", - DDBI(mc), root, mc->mc_pg[0]->mp_flags)); - - if (flags & MDB_PS_MODIFY) { - if ((rc = mdb_page_touch(mc))) - return rc; - } - - if (flags & MDB_PS_ROOTONLY) - return MDB_SUCCESS; - - return mdb_page_search_root(mc, key, flags); -} - -static int -mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp) -{ - MDB_txn *txn = mc->mc_txn; - pgno_t pg = mp->mp_pgno; - unsigned x = 0, ovpages = mp->mp_pages; - MDB_env *env = txn->mt_env; - MDB_IDL sl = txn->mt_spill_pgs; - MDB_ID pn = pg << 1; - int rc; - - DPRINTF(("free ov page %"Z"u (%d)", pg, ovpages)); - /* If the page is dirty or on the spill list we just acquired it, - * so we should give it back to our current free list, if any. - * Otherwise put it onto the list of pages we freed in this txn. - * - * Won't create me_pghead: me_pglast must be inited along with it. - * Unsupported in nested txns: They would need to hide the page - * range in ancestor txns' dirty and spilled lists. - */ - if (env->me_pghead && - !txn->mt_parent && - ((mp->mp_flags & P_DIRTY) || - (sl && (x = mdb_midl_search(sl, pn)) <= sl[0] && sl[x] == pn))) - { - unsigned i, j; - pgno_t *mop; - MDB_ID2 *dl, ix, iy; - rc = mdb_midl_need(&env->me_pghead, ovpages); - if (rc) - return rc; - if (!(mp->mp_flags & P_DIRTY)) { - /* This page is no longer spilled */ - if (x == sl[0]) - sl[0]--; - else - sl[x] |= 1; - goto release; - } - /* Remove from dirty list */ - dl = txn->mt_u.dirty_list; - x = dl[0].mid--; - for (ix = dl[x]; ix.mptr != mp; ix = iy) { - if (x > 1) { - x--; - iy = dl[x]; - dl[x] = ix; - } else { - mdb_cassert(mc, x > 1); - j = ++(dl[0].mid); - dl[j] = ix; /* Unsorted. OK when MDB_TXN_ERROR. */ - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - } - if (!(env->me_flags & MDB_WRITEMAP)) - mdb_dpage_free(env, mp); -release: - /* Insert in me_pghead */ - mop = env->me_pghead; - j = mop[0] + ovpages; - for (i = mop[0]; i && mop[i] < pg; i--) - mop[j--] = mop[i]; - while (j>i) - mop[j--] = pg++; - mop[0] += ovpages; - } else { - rc = mdb_midl_append_range(&txn->mt_free_pgs, pg, ovpages); - if (rc) - return rc; - } - mc->mc_db->md_overflow_pages -= ovpages; - return 0; -} - -/** Return the data associated with a given node. - * @param[in] txn The transaction for this operation. - * @param[in] leaf The node being read. - * @param[out] data Updated to point to the node's data. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_node_read(MDB_txn *txn, MDB_node *leaf, MDB_val *data) -{ - MDB_page *omp; /* overflow page */ - pgno_t pgno; - int rc; - - if (!F_ISSET(leaf->mn_flags, F_BIGDATA)) { - data->mv_size = NODEDSZ(leaf); - data->mv_data = NODEDATA(leaf); - return MDB_SUCCESS; - } - - /* Read overflow data. - */ - data->mv_size = NODEDSZ(leaf); - memcpy(&pgno, NODEDATA(leaf), sizeof(pgno)); - if ((rc = mdb_page_get(txn, pgno, &omp, NULL)) != 0) { - DPRINTF(("read overflow page %"Z"u failed", pgno)); - return rc; - } - data->mv_data = METADATA(omp); - - return MDB_SUCCESS; -} - -int -mdb_get(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data) -{ - MDB_cursor mc; - MDB_xcursor mx; - int exact = 0; - DKBUF; - - DPRINTF(("===> get db %u key [%s]", dbi, DKEY(key))); - - if (!key || !data || dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_ERROR) - return MDB_BAD_TXN; - - mdb_cursor_init(&mc, txn, dbi, &mx); - return mdb_cursor_set(&mc, key, data, MDB_SET, &exact); -} - -/** Find a sibling for a page. - * Replaces the page at the top of the cursor's stack with the - * specified sibling, if one exists. - * @param[in] mc The cursor for this operation. - * @param[in] move_right Non-zero if the right sibling is requested, - * otherwise the left sibling. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_cursor_sibling(MDB_cursor *mc, int move_right) -{ - int rc; - MDB_node *indx; - MDB_page *mp; - - if (mc->mc_snum < 2) { - return MDB_NOTFOUND; /* root has no siblings */ - } - - mdb_cursor_pop(mc); - DPRINTF(("parent page is page %"Z"u, index %u", - mc->mc_pg[mc->mc_top]->mp_pgno, mc->mc_ki[mc->mc_top])); - - if (move_right ? (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mc->mc_pg[mc->mc_top])) - : (mc->mc_ki[mc->mc_top] == 0)) { - DPRINTF(("no more keys left, moving to %s sibling", - move_right ? "right" : "left")); - if ((rc = mdb_cursor_sibling(mc, move_right)) != MDB_SUCCESS) { - /* undo cursor_pop before returning */ - mc->mc_top++; - mc->mc_snum++; - return rc; - } - } else { - if (move_right) - mc->mc_ki[mc->mc_top]++; - else - mc->mc_ki[mc->mc_top]--; - DPRINTF(("just moving to %s index key %u", - move_right ? "right" : "left", mc->mc_ki[mc->mc_top])); - } - mdb_cassert(mc, IS_BRANCH(mc->mc_pg[mc->mc_top])); - - indx = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if ((rc = mdb_page_get(mc->mc_txn, NODEPGNO(indx), &mp, NULL)) != 0) { - /* mc will be inconsistent if caller does mc_snum++ as above */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - return rc; - } - - mdb_cursor_push(mc, mp); - if (!move_right) - mc->mc_ki[mc->mc_top] = NUMKEYS(mp)-1; - - return MDB_SUCCESS; -} - -/** Move the cursor to the next data item. */ -static int -mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op) -{ - MDB_page *mp; - MDB_node *leaf; - int rc; - - if (mc->mc_flags & C_EOF) { - return MDB_NOTFOUND; - } - - mdb_cassert(mc, mc->mc_flags & C_INITIALIZED); - - mp = mc->mc_pg[mc->mc_top]; - - if (mc->mc_db->md_flags & MDB_DUPSORT) { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_NEXT || op == MDB_NEXT_DUP) { - rc = mdb_cursor_next(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_NEXT); - if (op != MDB_NEXT || rc != MDB_NOTFOUND) { - if (rc == MDB_SUCCESS) - MDB_GET_KEY(leaf, key); - return rc; - } - } - } else { - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if (op == MDB_NEXT_DUP) - return MDB_NOTFOUND; - } - } - - DPRINTF(("cursor_next: top page is %"Z"u in cursor %p", - mdb_dbg_pgno(mp), (void *) mc)); - if (mc->mc_flags & C_DEL) - goto skip; - - if (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mp)) { - DPUTS("=====> move to next sibling page"); - if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) { - mc->mc_flags |= C_EOF; - return rc; - } - mp = mc->mc_pg[mc->mc_top]; - DPRINTF(("next page is %"Z"u, key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top])); - } else - mc->mc_ki[mc->mc_top]++; - -skip: - DPRINTF(("==> cursor points to page %"Z"u with %u keys, key index %u", - mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top])); - - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) - return rc; - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Move the cursor to the previous data item. */ -static int -mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op) -{ - MDB_page *mp; - MDB_node *leaf; - int rc; - - mdb_cassert(mc, mc->mc_flags & C_INITIALIZED); - - mp = mc->mc_pg[mc->mc_top]; - - if (mc->mc_db->md_flags & MDB_DUPSORT) { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_PREV || op == MDB_PREV_DUP) { - rc = mdb_cursor_prev(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_PREV); - if (op != MDB_PREV || rc != MDB_NOTFOUND) { - if (rc == MDB_SUCCESS) { - MDB_GET_KEY(leaf, key); - mc->mc_flags &= ~C_EOF; - } - return rc; - } - } else { - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if (op == MDB_PREV_DUP) - return MDB_NOTFOUND; - } - } - } - - DPRINTF(("cursor_prev: top page is %"Z"u in cursor %p", - mdb_dbg_pgno(mp), (void *) mc)); - - if (mc->mc_ki[mc->mc_top] == 0) { - DPUTS("=====> move to prev sibling page"); - if ((rc = mdb_cursor_sibling(mc, 0)) != MDB_SUCCESS) { - return rc; - } - mp = mc->mc_pg[mc->mc_top]; - mc->mc_ki[mc->mc_top] = NUMKEYS(mp) - 1; - DPRINTF(("prev page is %"Z"u, key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top])); - } else - mc->mc_ki[mc->mc_top]--; - - mc->mc_flags &= ~C_EOF; - - DPRINTF(("==> cursor points to page %"Z"u with %u keys, key index %u", - mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top])); - - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) - return rc; - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Set the cursor on a specific data item. */ -static int -mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, - MDB_cursor_op op, int *exactp) -{ - int rc; - MDB_page *mp; - MDB_node *leaf = NULL; - DKBUF; - - if (key->mv_size == 0) - return MDB_BAD_VALSIZE; - - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - - /* See if we're already on the right page */ - if (mc->mc_flags & C_INITIALIZED) { - MDB_val nodekey; - - mp = mc->mc_pg[mc->mc_top]; - if (!NUMKEYS(mp)) { - mc->mc_ki[mc->mc_top] = 0; - return MDB_NOTFOUND; - } - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_size = mc->mc_db->md_pad; - nodekey.mv_data = LEAF2KEY(mp, 0, nodekey.mv_size); - } else { - leaf = NODEPTR(mp, 0); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* Probably happens rarely, but first node on the page - * was the one we wanted. - */ - mc->mc_ki[mc->mc_top] = 0; - if (exactp) - *exactp = 1; - goto set1; - } - if (rc > 0) { - unsigned int i; - unsigned int nkeys = NUMKEYS(mp); - if (nkeys > 1) { - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_data = LEAF2KEY(mp, - nkeys-1, nodekey.mv_size); - } else { - leaf = NODEPTR(mp, nkeys-1); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* last node was the one we wanted */ - mc->mc_ki[mc->mc_top] = nkeys-1; - if (exactp) - *exactp = 1; - goto set1; - } - if (rc < 0) { - if (mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) { - /* This is definitely the right page, skip search_page */ - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_data = LEAF2KEY(mp, - mc->mc_ki[mc->mc_top], nodekey.mv_size); - } else { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* current node was the one we wanted */ - if (exactp) - *exactp = 1; - goto set1; - } - } - rc = 0; - goto set2; - } - } - /* If any parents have right-sibs, search. - * Otherwise, there's nothing further. - */ - for (i=0; imc_top; i++) - if (mc->mc_ki[i] < - NUMKEYS(mc->mc_pg[i])-1) - break; - if (i == mc->mc_top) { - /* There are no other pages */ - mc->mc_ki[mc->mc_top] = nkeys; - return MDB_NOTFOUND; - } - } - if (!mc->mc_top) { - /* There are no other pages */ - mc->mc_ki[mc->mc_top] = 0; - if (op == MDB_SET_RANGE && !exactp) { - rc = 0; - goto set1; - } else - return MDB_NOTFOUND; - } - } - - rc = mdb_page_search(mc, key, 0); - if (rc != MDB_SUCCESS) - return rc; - - mp = mc->mc_pg[mc->mc_top]; - mdb_cassert(mc, IS_LEAF(mp)); - -set2: - leaf = mdb_node_search(mc, key, exactp); - if (exactp != NULL && !*exactp) { - /* MDB_SET specified and not an exact match. */ - return MDB_NOTFOUND; - } - - if (leaf == NULL) { - DPUTS("===> inexact leaf not found, goto sibling"); - if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) - return rc; /* no entries matched */ - mp = mc->mc_pg[mc->mc_top]; - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, 0); - } - -set1: - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - if (IS_LEAF2(mp)) { - if (op == MDB_SET_RANGE || op == MDB_SET_KEY) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - } - return MDB_SUCCESS; - } - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_SET || op == MDB_SET_KEY || op == MDB_SET_RANGE) { - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - } else { - int ex2, *ex2p; - if (op == MDB_GET_BOTH) { - ex2p = &ex2; - ex2 = 0; - } else { - ex2p = NULL; - } - rc = mdb_cursor_set(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_SET_RANGE, ex2p); - if (rc != MDB_SUCCESS) - return rc; - } - } else if (op == MDB_GET_BOTH || op == MDB_GET_BOTH_RANGE) { - MDB_val d2; - if ((rc = mdb_node_read(mc->mc_txn, leaf, &d2)) != MDB_SUCCESS) - return rc; - rc = mc->mc_dbx->md_dcmp(data, &d2); - if (rc) { - if (op == MDB_GET_BOTH || rc > 0) - return MDB_NOTFOUND; - rc = 0; - *data = d2; - } - - } else { - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - - /* The key already matches in all other cases */ - if (op == MDB_SET_RANGE || op == MDB_SET_KEY) - MDB_GET_KEY(leaf, key); - DPRINTF(("==> cursor placed on key [%s]", DKEY(key))); - - return rc; -} - -/** Move the cursor to the first item in the database. */ -static int -mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data) -{ - int rc; - MDB_node *leaf; - - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - - if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { - rc = mdb_page_search(mc, NULL, MDB_PS_FIRST); - if (rc != MDB_SUCCESS) - return rc; - } - mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); - - leaf = NODEPTR(mc->mc_pg[mc->mc_top], 0); - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - mc->mc_ki[mc->mc_top] = 0; - - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], 0, key->mv_size); - return MDB_SUCCESS; - } - - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc) - return rc; - } else { - if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Move the cursor to the last item in the database. */ -static int -mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data) -{ - int rc; - MDB_node *leaf; - - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - - if (!(mc->mc_flags & C_EOF)) { - - if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { - rc = mdb_page_search(mc, NULL, MDB_PS_LAST); - if (rc != MDB_SUCCESS) - return rc; - } - mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); - - } - mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]) - 1; - mc->mc_flags |= C_INITIALIZED|C_EOF; - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc) - return rc; - } else { - if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -int -mdb_cursor_get(MDB_cursor *mc, MDB_val *key, MDB_val *data, - MDB_cursor_op op) -{ - int rc; - int exact = 0; - int (*mfunc)(MDB_cursor *mc, MDB_val *key, MDB_val *data); - - if (mc == NULL) - return EINVAL; - - if (mc->mc_txn->mt_flags & MDB_TXN_ERROR) - return MDB_BAD_TXN; - - switch (op) { - case MDB_GET_CURRENT: - if (!(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - } else { - MDB_page *mp = mc->mc_pg[mc->mc_top]; - int nkeys = NUMKEYS(mp); - if (!nkeys || mc->mc_ki[mc->mc_top] >= nkeys) { - mc->mc_ki[mc->mc_top] = nkeys; - rc = MDB_NOTFOUND; - break; - } - rc = MDB_SUCCESS; - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - } else { - MDB_node *leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - MDB_GET_KEY(leaf, key); - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (mc->mc_flags & C_DEL) - mdb_xcursor_init1(mc, leaf); - rc = mdb_cursor_get(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_GET_CURRENT); - } else { - rc = mdb_node_read(mc->mc_txn, leaf, data); - } - } - } - } - break; - case MDB_GET_BOTH: - case MDB_GET_BOTH_RANGE: - if (data == NULL) { - rc = EINVAL; - break; - } - if (mc->mc_xcursor == NULL) { - rc = MDB_INCOMPATIBLE; - break; - } - /* FALLTHRU */ - case MDB_SET: - case MDB_SET_KEY: - case MDB_SET_RANGE: - if (key == NULL) { - rc = EINVAL; - } else { - rc = mdb_cursor_set(mc, key, data, op, - op == MDB_SET_RANGE ? NULL : &exact); - } - break; - case MDB_GET_MULTIPLE: - if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - rc = MDB_INCOMPATIBLE; - break; - } - rc = MDB_SUCCESS; - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) || - (mc->mc_xcursor->mx_cursor.mc_flags & C_EOF)) - break; - goto fetchm; - case MDB_NEXT_MULTIPLE: - if (data == NULL) { - rc = EINVAL; - break; - } - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - rc = MDB_INCOMPATIBLE; - break; - } - if (!(mc->mc_flags & C_INITIALIZED)) - rc = mdb_cursor_first(mc, key, data); - else - rc = mdb_cursor_next(mc, key, data, MDB_NEXT_DUP); - if (rc == MDB_SUCCESS) { - if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - MDB_cursor *mx; -fetchm: - mx = &mc->mc_xcursor->mx_cursor; - data->mv_size = NUMKEYS(mx->mc_pg[mx->mc_top]) * - mx->mc_db->md_pad; - data->mv_data = METADATA(mx->mc_pg[mx->mc_top]); - mx->mc_ki[mx->mc_top] = NUMKEYS(mx->mc_pg[mx->mc_top])-1; - } else { - rc = MDB_NOTFOUND; - } - } - break; - case MDB_NEXT: - case MDB_NEXT_DUP: - case MDB_NEXT_NODUP: - if (!(mc->mc_flags & C_INITIALIZED)) - rc = mdb_cursor_first(mc, key, data); - else - rc = mdb_cursor_next(mc, key, data, op); - break; - case MDB_PREV: - case MDB_PREV_DUP: - case MDB_PREV_NODUP: - if (!(mc->mc_flags & C_INITIALIZED)) { - rc = mdb_cursor_last(mc, key, data); - if (rc) - break; - mc->mc_flags |= C_INITIALIZED; - mc->mc_ki[mc->mc_top]++; - } - rc = mdb_cursor_prev(mc, key, data, op); - break; - case MDB_FIRST: - rc = mdb_cursor_first(mc, key, data); - break; - case MDB_FIRST_DUP: - mfunc = mdb_cursor_first; - mmove: - if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - if (mc->mc_xcursor == NULL) { - rc = MDB_INCOMPATIBLE; - break; - } - { - MDB_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - MDB_GET_KEY(leaf, key); - if (data) - rc = mdb_node_read(mc->mc_txn, leaf, data); - break; - } - } - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - rc = mfunc(&mc->mc_xcursor->mx_cursor, data, NULL); - break; - case MDB_LAST: - rc = mdb_cursor_last(mc, key, data); - break; - case MDB_LAST_DUP: - mfunc = mdb_cursor_last; - goto mmove; - default: - DPRINTF(("unhandled/unimplemented cursor operation %u", op)); - rc = EINVAL; - break; - } - - if (mc->mc_flags & C_DEL) - mc->mc_flags ^= C_DEL; - - return rc; -} - -/** Touch all the pages in the cursor stack. Set mc_top. - * Makes sure all the pages are writable, before attempting a write operation. - * @param[in] mc The cursor to operate on. - */ -static int -mdb_cursor_touch(MDB_cursor *mc) -{ - int rc = MDB_SUCCESS; - - if (mc->mc_dbi > MAIN_DBI && !(*mc->mc_dbflag & DB_DIRTY)) { - MDB_cursor mc2; - MDB_xcursor mcx; - if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) - return MDB_BAD_DBI; - mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, &mcx); - rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, MDB_PS_MODIFY); - if (rc) - return rc; - *mc->mc_dbflag |= DB_DIRTY; - } - mc->mc_top = 0; - if (mc->mc_snum) { - do { - rc = mdb_page_touch(mc); - } while (!rc && ++(mc->mc_top) < mc->mc_snum); - mc->mc_top = mc->mc_snum-1; - } - return rc; -} - -/** Do not spill pages to disk if txn is getting full, may fail instead */ -#define MDB_NOSPILL 0x8000 - -int -mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, - unsigned int flags) -{ - enum { MDB_NO_ROOT = MDB_LAST_ERRCODE+10 }; /* internal code */ - MDB_env *env; - MDB_node *leaf = NULL; - MDB_page *fp, *mp; - uint16_t fp_flags; - MDB_val xdata, *rdata, dkey, olddata; - MDB_db dummy; - int do_sub = 0, insert_key, insert_data; - unsigned int mcount = 0, dcount = 0, nospill; - size_t nsize; - int rc, rc2; - unsigned int nflags; - DKBUF; - - if (mc == NULL || key == NULL) - return EINVAL; - - env = mc->mc_txn->mt_env; - - /* Check this first so counter will always be zero on any - * early failures. - */ - if (flags & MDB_MULTIPLE) { - dcount = data[1].mv_size; - data[1].mv_size = 0; - if (!F_ISSET(mc->mc_db->md_flags, MDB_DUPFIXED)) - return MDB_INCOMPATIBLE; - } - - nospill = flags & MDB_NOSPILL; - flags &= ~MDB_NOSPILL; - - if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_ERROR)) - return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (key->mv_size-1 >= ENV_MAXKEY(env)) - return MDB_BAD_VALSIZE; - -#if SIZE_MAX > MAXDATASIZE - if (data->mv_size > ((mc->mc_db->md_flags & MDB_DUPSORT) ? ENV_MAXKEY(env) : MAXDATASIZE)) - return MDB_BAD_VALSIZE; -#else - if ((mc->mc_db->md_flags & MDB_DUPSORT) && data->mv_size > ENV_MAXKEY(env)) - return MDB_BAD_VALSIZE; -#endif - - DPRINTF(("==> put db %d key [%s], size %"Z"u, data size %"Z"u", - DDBI(mc), DKEY(key), key ? key->mv_size : 0, data->mv_size)); - - dkey.mv_size = 0; - - if (flags == MDB_CURRENT) { - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - rc = MDB_SUCCESS; - } else if (mc->mc_db->md_root == P_INVALID) { - /* new database, cursor has nothing to point to */ - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_flags &= ~C_INITIALIZED; - rc = MDB_NO_ROOT; - } else { - int exact = 0; - MDB_val d2; - if (flags & MDB_APPEND) { - MDB_val k2; - rc = mdb_cursor_last(mc, &k2, &d2); - if (rc == 0) { - rc = mc->mc_dbx->md_cmp(key, &k2); - if (rc > 0) { - rc = MDB_NOTFOUND; - mc->mc_ki[mc->mc_top]++; - } else { - /* new key is <= last key */ - rc = MDB_KEYEXIST; - } - } - } else { - rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact); - } - if ((flags & MDB_NOOVERWRITE) && rc == 0) { - DPRINTF(("duplicate key [%s]", DKEY(key))); - *data = d2; - return MDB_KEYEXIST; - } - if (rc && rc != MDB_NOTFOUND) - return rc; - } - - if (mc->mc_flags & C_DEL) - mc->mc_flags ^= C_DEL; - - /* Cursor is positioned, check for room in the dirty list */ - if (!nospill) { - if (flags & MDB_MULTIPLE) { - rdata = &xdata; - xdata.mv_size = data->mv_size * dcount; - } else { - rdata = data; - } - if ((rc2 = mdb_page_spill(mc, key, rdata))) - return rc2; - } - - if (rc == MDB_NO_ROOT) { - MDB_page *np; - /* new database, write a root leaf page */ - DPUTS("allocating new root leaf page"); - if ((rc2 = mdb_page_new(mc, P_LEAF, 1, &np))) { - return rc2; - } - mdb_cursor_push(mc, np); - mc->mc_db->md_root = np->mp_pgno; - mc->mc_db->md_depth++; - *mc->mc_dbflag |= DB_DIRTY; - if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED)) - == MDB_DUPFIXED) - np->mp_flags |= P_LEAF2; - mc->mc_flags |= C_INITIALIZED; - } else { - /* make sure all cursor pages are writable */ - rc2 = mdb_cursor_touch(mc); - if (rc2) - return rc2; - } - - insert_key = insert_data = rc; - if (insert_key) { - /* The key does not exist */ - DPRINTF(("inserting key at index %i", mc->mc_ki[mc->mc_top])); - if ((mc->mc_db->md_flags & MDB_DUPSORT) && - LEAFSIZE(key, data) > env->me_nodemax) - { - /* Too big for a node, insert in sub-DB. Set up an empty - * "old sub-page" for prep_subDB to expand to a full page. - */ - fp_flags = P_LEAF|P_DIRTY; - fp = env->me_pbuf; - fp->mp_pad = data->mv_size; /* used if MDB_DUPFIXED */ - fp->mp_lower = fp->mp_upper = (PAGEHDRSZ-PAGEBASE); - olddata.mv_size = PAGEHDRSZ; - goto prep_subDB; - } - } else { - /* there's only a key anyway, so this is a no-op */ - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - char *ptr; - unsigned int ksize = mc->mc_db->md_pad; - if (key->mv_size != ksize) - return MDB_BAD_VALSIZE; - ptr = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], ksize); - memcpy(ptr, key->mv_data, ksize); -fix_parent: - /* if overwriting slot 0 of leaf, need to - * update branch key if there is a parent page - */ - if (mc->mc_top && !mc->mc_ki[mc->mc_top]) { - unsigned short top = mc->mc_top; - mc->mc_top--; - /* slot 0 is always an empty key, find real slot */ - while (mc->mc_top && !mc->mc_ki[mc->mc_top]) - mc->mc_top--; - if (mc->mc_ki[mc->mc_top]) - rc2 = mdb_update_key(mc, key); - else - rc2 = MDB_SUCCESS; - mc->mc_top = top; - if (rc2) - return rc2; - } - return MDB_SUCCESS; - } - -more: - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - olddata.mv_size = NODEDSZ(leaf); - olddata.mv_data = NODEDATA(leaf); - - /* DB has dups? */ - if (F_ISSET(mc->mc_db->md_flags, MDB_DUPSORT)) { - /* Prepare (sub-)page/sub-DB to accept the new item, - * if needed. fp: old sub-page or a header faking - * it. mp: new (sub-)page. offset: growth in page - * size. xdata: node data with new page or DB. - */ - unsigned i, offset = 0; - mp = fp = xdata.mv_data = env->me_pbuf; - mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno; - - /* Was a single item before, must convert now */ - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - /* Just overwrite the current item */ - if (flags == MDB_CURRENT) - goto current; - -#if UINT_MAX < SIZE_MAX - if (mc->mc_dbx->md_dcmp == mdb_cmp_int && olddata.mv_size == sizeof(size_t)) - mc->mc_dbx->md_dcmp = mdb_cmp_clong; -#endif - /* does data match? */ - if (!mc->mc_dbx->md_dcmp(data, &olddata)) { - if (flags & MDB_NODUPDATA) - return MDB_KEYEXIST; - /* overwrite it */ - goto current; - } - - /* Back up original data item */ - dkey.mv_size = olddata.mv_size; - dkey.mv_data = memcpy(fp+1, olddata.mv_data, olddata.mv_size); - - /* Make sub-page header for the dup items, with dummy body */ - fp->mp_flags = P_LEAF|P_DIRTY|P_SUBP; - fp->mp_lower = (PAGEHDRSZ-PAGEBASE); - xdata.mv_size = PAGEHDRSZ + dkey.mv_size + data->mv_size; - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - fp->mp_flags |= P_LEAF2; - fp->mp_pad = data->mv_size; - xdata.mv_size += 2 * data->mv_size; /* leave space for 2 more */ - } else { - xdata.mv_size += 2 * (sizeof(indx_t) + NODESIZE) + - (dkey.mv_size & 1) + (data->mv_size & 1); - } - fp->mp_upper = xdata.mv_size - PAGEBASE; - olddata.mv_size = xdata.mv_size; /* pretend olddata is fp */ - } else if (leaf->mn_flags & F_SUBDATA) { - /* Data is on sub-DB, just store it */ - flags |= F_DUPDATA|F_SUBDATA; - goto put_sub; - } else { - /* Data is on sub-page */ - fp = olddata.mv_data; - switch (flags) { - default: - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - offset = EVEN(NODESIZE + sizeof(indx_t) + - data->mv_size); - break; - } - offset = fp->mp_pad; - if (SIZELEFT(fp) < offset) { - offset *= 4; /* space for 4 more */ - break; - } - /* FALLTHRU: Big enough MDB_DUPFIXED sub-page */ - case MDB_CURRENT: - fp->mp_flags |= P_DIRTY; - COPY_PGNO(fp->mp_pgno, mp->mp_pgno); - mc->mc_xcursor->mx_cursor.mc_pg[0] = fp; - flags |= F_DUPDATA; - goto put_sub; - } - xdata.mv_size = olddata.mv_size + offset; - } - - fp_flags = fp->mp_flags; - if (NODESIZE + NODEKSZ(leaf) + xdata.mv_size > env->me_nodemax) { - /* Too big for a sub-page, convert to sub-DB */ - fp_flags &= ~P_SUBP; -prep_subDB: - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - fp_flags |= P_LEAF2; - dummy.md_pad = fp->mp_pad; - dummy.md_flags = MDB_DUPFIXED; - if (mc->mc_db->md_flags & MDB_INTEGERDUP) - dummy.md_flags |= MDB_INTEGERKEY; - } else { - dummy.md_pad = 0; - dummy.md_flags = 0; - } - dummy.md_depth = 1; - dummy.md_branch_pages = 0; - dummy.md_leaf_pages = 1; - dummy.md_overflow_pages = 0; - dummy.md_entries = NUMKEYS(fp); - xdata.mv_size = sizeof(MDB_db); - xdata.mv_data = &dummy; - if ((rc = mdb_page_alloc(mc, 1, &mp))) - return rc; - offset = env->me_psize - olddata.mv_size; - flags |= F_DUPDATA|F_SUBDATA; - dummy.md_root = mp->mp_pgno; - } - if (mp != fp) { - mp->mp_flags = fp_flags | P_DIRTY; - mp->mp_pad = fp->mp_pad; - mp->mp_lower = fp->mp_lower; - mp->mp_upper = fp->mp_upper + offset; - if (fp_flags & P_LEAF2) { - memcpy(METADATA(mp), METADATA(fp), NUMKEYS(fp) * fp->mp_pad); - } else { - memcpy((char *)mp + mp->mp_upper + PAGEBASE, (char *)fp + fp->mp_upper + PAGEBASE, - olddata.mv_size - fp->mp_upper - PAGEBASE); - for (i=0; imp_ptrs[i] = fp->mp_ptrs[i] + offset; - } - } - - rdata = &xdata; - flags |= F_DUPDATA; - do_sub = 1; - if (!insert_key) - mdb_node_del(mc, 0); - goto new_sub; - } -current: - /* overflow page overwrites need special handling */ - if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { - MDB_page *omp; - pgno_t pg; - int level, ovpages, dpages = OVPAGES(data->mv_size, env->me_psize); - - memcpy(&pg, olddata.mv_data, sizeof(pg)); - if ((rc2 = mdb_page_get(mc->mc_txn, pg, &omp, &level)) != 0) - return rc2; - ovpages = omp->mp_pages; - - /* Is the ov page large enough? */ - if (ovpages >= dpages) { - if (!(omp->mp_flags & P_DIRTY) && - (level || (env->me_flags & MDB_WRITEMAP))) - { - rc = mdb_page_unspill(mc->mc_txn, omp, &omp); - if (rc) - return rc; - level = 0; /* dirty in this txn or clean */ - } - /* Is it dirty? */ - if (omp->mp_flags & P_DIRTY) { - /* yes, overwrite it. Note in this case we don't - * bother to try shrinking the page if the new data - * is smaller than the overflow threshold. - */ - if (level > 1) { - /* It is writable only in a parent txn */ - size_t sz = (size_t) env->me_psize * ovpages, off; - MDB_page *np = mdb_page_malloc(mc->mc_txn, ovpages); - MDB_ID2 id2; - if (!np) - return ENOMEM; - id2.mid = pg; - id2.mptr = np; - rc2 = mdb_mid2l_insert(mc->mc_txn->mt_u.dirty_list, &id2); - mdb_cassert(mc, rc2 == 0); - if (!(flags & MDB_RESERVE)) { - /* Copy end of page, adjusting alignment so - * compiler may copy words instead of bytes. - */ - off = (PAGEHDRSZ + data->mv_size) & -sizeof(size_t); - memcpy((size_t *)((char *)np + off), - (size_t *)((char *)omp + off), sz - off); - sz = PAGEHDRSZ; - } - memcpy(np, omp, sz); /* Copy beginning of page */ - omp = np; - } - SETDSZ(leaf, data->mv_size); - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = METADATA(omp); - else - memcpy(METADATA(omp), data->mv_data, data->mv_size); - return MDB_SUCCESS; - } - } - if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS) - return rc2; - } else if (data->mv_size == olddata.mv_size) { - /* same size, just replace it. Note that we could - * also reuse this node if the new data is smaller, - * but instead we opt to shrink the node in that case. - */ - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = olddata.mv_data; - else if (!(mc->mc_flags & C_SUB)) - memcpy(olddata.mv_data, data->mv_data, data->mv_size); - else { - memcpy(NODEKEY(leaf), key->mv_data, key->mv_size); - goto fix_parent; - } - return MDB_SUCCESS; - } - mdb_node_del(mc, 0); - } - - rdata = data; - -new_sub: - nflags = flags & NODE_ADD_FLAGS; - nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->mv_size : mdb_leaf_size(env, key, rdata); - if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) { - if (( flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA ) - nflags &= ~MDB_APPEND; /* sub-page may need room to grow */ - if (!insert_key) - nflags |= MDB_SPLIT_REPLACE; - rc = mdb_page_split(mc, key, rdata, P_INVALID, nflags); - } else { - /* There is room already in this leaf page. */ - rc = mdb_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags); - if (rc == 0 && insert_key) { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - unsigned i = mc->mc_top; - MDB_page *mp = mc->mc_pg[i]; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc || m3->mc_snum < mc->mc_snum) continue; - if (m3->mc_pg[i] == mp && m3->mc_ki[i] >= mc->mc_ki[i]) { - m3->mc_ki[i]++; - } - } - } - } - - if (rc == MDB_SUCCESS) { - /* Now store the actual data in the child DB. Note that we're - * storing the user data in the keys field, so there are strict - * size limits on dupdata. The actual data fields of the child - * DB are all zero size. - */ - if (do_sub) { - int xflags; - size_t ecount; -put_sub: - xdata.mv_size = 0; - xdata.mv_data = ""; - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (flags & MDB_CURRENT) { - xflags = MDB_CURRENT|MDB_NOSPILL; - } else { - mdb_xcursor_init1(mc, leaf); - xflags = (flags & MDB_NODUPDATA) ? - MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL; - } - /* converted, write the original data first */ - if (dkey.mv_size) { - rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags); - if (rc) - goto bad_sub; - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2; - unsigned i = mc->mc_top; - MDB_page *mp = mc->mc_pg[i]; - - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; - if (!(m2->mc_flags & C_INITIALIZED)) continue; - if (m2->mc_pg[i] == mp && m2->mc_ki[i] == mc->mc_ki[i]) { - mdb_xcursor_init1(m2, leaf); - } - } - } - /* we've done our job */ - dkey.mv_size = 0; - } - ecount = mc->mc_xcursor->mx_db.md_entries; - if (flags & MDB_APPENDDUP) - xflags |= MDB_APPEND; - rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags); - if (flags & F_SUBDATA) { - void *db = NODEDATA(leaf); - memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db)); - } - insert_data = mc->mc_xcursor->mx_db.md_entries - ecount; - } - /* Increment count unless we just replaced an existing item. */ - if (insert_data) - mc->mc_db->md_entries++; - if (insert_key) { - /* Invalidate txn if we created an empty sub-DB */ - if (rc) - goto bad_sub; - /* If we succeeded and the key didn't exist before, - * make sure the cursor is marked valid. - */ - mc->mc_flags |= C_INITIALIZED; - } - if (flags & MDB_MULTIPLE) { - if (!rc) { - mcount++; - /* let caller know how many succeeded, if any */ - data[1].mv_size = mcount; - if (mcount < dcount) { - data[0].mv_data = (char *)data[0].mv_data + data[0].mv_size; - insert_key = insert_data = 0; - goto more; - } - } - } - return rc; -bad_sub: - if (rc == MDB_KEYEXIST) /* should not happen, we deleted that item */ - rc = MDB_CORRUPTED; - } - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_cursor_del(MDB_cursor *mc, unsigned int flags) -{ - MDB_node *leaf; - MDB_page *mp; - int rc; - - if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_ERROR)) - return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - - if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) - return MDB_NOTFOUND; - - if (!(flags & MDB_NOSPILL) && (rc = mdb_page_spill(mc, NULL, NULL))) - return rc; - - rc = mdb_cursor_touch(mc); - if (rc) - return rc; - - mp = mc->mc_pg[mc->mc_top]; - if (IS_LEAF2(mp)) - goto del_key; - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (flags & MDB_NODUPDATA) { - /* mdb_cursor_del0() will subtract the final entry */ - mc->mc_db->md_entries -= mc->mc_xcursor->mx_db.md_entries - 1; - } else { - if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) { - mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - } - rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, MDB_NOSPILL); - if (rc) - return rc; - /* If sub-DB still has entries, we're done */ - if (mc->mc_xcursor->mx_db.md_entries) { - if (leaf->mn_flags & F_SUBDATA) { - /* update subDB info */ - void *db = NODEDATA(leaf); - memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db)); - } else { - MDB_cursor *m2; - /* shrink fake page */ - mdb_node_shrink(mp, mc->mc_ki[mc->mc_top]); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - /* fix other sub-DB cursors pointed at this fake page */ - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; - if (m2->mc_pg[mc->mc_top] == mp && - m2->mc_ki[mc->mc_top] == mc->mc_ki[mc->mc_top]) - m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - } - } - mc->mc_db->md_entries--; - mc->mc_flags |= C_DEL; - return rc; - } - /* otherwise fall thru and delete the sub-DB */ - } - - if (leaf->mn_flags & F_SUBDATA) { - /* add all the child DB's pages to the free list */ - rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0); - if (rc) - goto fail; - } - } - - /* add overflow pages to free list */ - if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { - MDB_page *omp; - pgno_t pg; - - memcpy(&pg, NODEDATA(leaf), sizeof(pg)); - if ((rc = mdb_page_get(mc->mc_txn, pg, &omp, NULL)) || - (rc = mdb_ovpage_free(mc, omp))) - goto fail; - } - -del_key: - return mdb_cursor_del0(mc); - -fail: - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -/** Allocate and initialize new pages for a database. - * @param[in] mc a cursor on the database being added to. - * @param[in] flags flags defining what type of page is being allocated. - * @param[in] num the number of pages to allocate. This is usually 1, - * unless allocating overflow pages for a large record. - * @param[out] mp Address of a page, or NULL on failure. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp) -{ - MDB_page *np; - int rc; - - if ((rc = mdb_page_alloc(mc, num, &np))) - return rc; - DPRINTF(("allocated new mpage %"Z"u, page size %u", - np->mp_pgno, mc->mc_txn->mt_env->me_psize)); - np->mp_flags = flags | P_DIRTY; - np->mp_lower = (PAGEHDRSZ-PAGEBASE); - np->mp_upper = mc->mc_txn->mt_env->me_psize - PAGEBASE; - - if (IS_BRANCH(np)) - mc->mc_db->md_branch_pages++; - else if (IS_LEAF(np)) - mc->mc_db->md_leaf_pages++; - else if (IS_OVERFLOW(np)) { - mc->mc_db->md_overflow_pages += num; - np->mp_pages = num; - } - *mp = np; - - return 0; -} - -/** Calculate the size of a leaf node. - * The size depends on the environment's page size; if a data item - * is too large it will be put onto an overflow page and the node - * size will only include the key and not the data. Sizes are always - * rounded up to an even number of bytes, to guarantee 2-byte alignment - * of the #MDB_node headers. - * @param[in] env The environment handle. - * @param[in] key The key for the node. - * @param[in] data The data for the node. - * @return The number of bytes needed to store the node. - */ -static size_t -mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data) -{ - size_t sz; - - sz = LEAFSIZE(key, data); - if (sz > env->me_nodemax) { - /* put on overflow page */ - sz -= data->mv_size - sizeof(pgno_t); - } - - return EVEN(sz + sizeof(indx_t)); -} - -/** Calculate the size of a branch node. - * The size should depend on the environment's page size but since - * we currently don't support spilling large keys onto overflow - * pages, it's simply the size of the #MDB_node header plus the - * size of the key. Sizes are always rounded up to an even number - * of bytes, to guarantee 2-byte alignment of the #MDB_node headers. - * @param[in] env The environment handle. - * @param[in] key The key for the node. - * @return The number of bytes needed to store the node. - */ -static size_t -mdb_branch_size(MDB_env *env, MDB_val *key) -{ - size_t sz; - - sz = INDXSIZE(key); - if (sz > env->me_nodemax) { - /* put on overflow page */ - /* not implemented */ - /* sz -= key->size - sizeof(pgno_t); */ - } - - return sz + sizeof(indx_t); -} - -/** Add a node to the page pointed to by the cursor. - * @param[in] mc The cursor for this operation. - * @param[in] indx The index on the page where the new node should be added. - * @param[in] key The key for the new node. - * @param[in] data The data for the new node, if any. - * @param[in] pgno The page number, if adding a branch node. - * @param[in] flags Flags for the node. - * @return 0 on success, non-zero on failure. Possible errors are: - *
    - *
  • ENOMEM - failed to allocate overflow pages for the node. - *
  • MDB_PAGE_FULL - there is insufficient room in the page. This error - * should never happen since all callers already calculate the - * page's free space before calling this function. - *
- */ -static int -mdb_node_add(MDB_cursor *mc, indx_t indx, - MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags) -{ - unsigned int i; - size_t node_size = NODESIZE; - ssize_t room; - indx_t ofs; - MDB_node *node; - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_page *ofp = NULL; /* overflow page */ - DKBUF; - - mdb_cassert(mc, mp->mp_upper >= mp->mp_lower); - - DPRINTF(("add to %s %spage %"Z"u index %i, data size %"Z"u key size %"Z"u [%s]", - IS_LEAF(mp) ? "leaf" : "branch", - IS_SUBP(mp) ? "sub-" : "", - mdb_dbg_pgno(mp), indx, data ? data->mv_size : 0, - key ? key->mv_size : 0, key ? DKEY(key) : "null")); - - if (IS_LEAF2(mp)) { - /* Move higher keys up one slot. */ - int ksize = mc->mc_db->md_pad, dif; - char *ptr = LEAF2KEY(mp, indx, ksize); - dif = NUMKEYS(mp) - indx; - if (dif > 0) - memmove(ptr+ksize, ptr, dif*ksize); - /* insert new key */ - memcpy(ptr, key->mv_data, ksize); - - /* Just using these for counting */ - mp->mp_lower += sizeof(indx_t); - mp->mp_upper -= ksize - sizeof(indx_t); - return MDB_SUCCESS; - } - - room = (ssize_t)SIZELEFT(mp) - (ssize_t)sizeof(indx_t); - if (key != NULL) - node_size += key->mv_size; - if (IS_LEAF(mp)) { - mdb_cassert(mc, data); - if (F_ISSET(flags, F_BIGDATA)) { - /* Data already on overflow page. */ - node_size += sizeof(pgno_t); - } else if (node_size + data->mv_size > mc->mc_txn->mt_env->me_nodemax) { - int ovpages = OVPAGES(data->mv_size, mc->mc_txn->mt_env->me_psize); - int rc; - /* Put data on overflow page. */ - DPRINTF(("data size is %"Z"u, node would be %"Z"u, put data on overflow page", - data->mv_size, node_size+data->mv_size)); - node_size = EVEN(node_size + sizeof(pgno_t)); - if ((ssize_t)node_size > room) - goto full; - if ((rc = mdb_page_new(mc, P_OVERFLOW, ovpages, &ofp))) - return rc; - DPRINTF(("allocated overflow page %"Z"u", ofp->mp_pgno)); - flags |= F_BIGDATA; - goto update; - } else { - node_size += data->mv_size; - } - } - node_size = EVEN(node_size); - if ((ssize_t)node_size > room) - goto full; - -update: - /* Move higher pointers up one slot. */ - for (i = NUMKEYS(mp); i > indx; i--) - mp->mp_ptrs[i] = mp->mp_ptrs[i - 1]; - - /* Adjust free space offsets. */ - ofs = mp->mp_upper - node_size; - mdb_cassert(mc, ofs >= mp->mp_lower + sizeof(indx_t)); - mp->mp_ptrs[indx] = ofs; - mp->mp_upper = ofs; - mp->mp_lower += sizeof(indx_t); - - /* Write the node data. */ - node = NODEPTR(mp, indx); - node->mn_ksize = (key == NULL) ? 0 : key->mv_size; - node->mn_flags = flags; - if (IS_LEAF(mp)) - SETDSZ(node,data->mv_size); - else - SETPGNO(node,pgno); - - if (key) - memcpy(NODEKEY(node), key->mv_data, key->mv_size); - - if (IS_LEAF(mp)) { - mdb_cassert(mc, key); - if (ofp == NULL) { - if (F_ISSET(flags, F_BIGDATA)) - memcpy(node->mn_data + key->mv_size, data->mv_data, - sizeof(pgno_t)); - else if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = node->mn_data + key->mv_size; - else - memcpy(node->mn_data + key->mv_size, data->mv_data, - data->mv_size); - } else { - memcpy(node->mn_data + key->mv_size, &ofp->mp_pgno, - sizeof(pgno_t)); - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = METADATA(ofp); - else - memcpy(METADATA(ofp), data->mv_data, data->mv_size); - } - } - - return MDB_SUCCESS; - -full: - DPRINTF(("not enough room in page %"Z"u, got %u ptrs", - mdb_dbg_pgno(mp), NUMKEYS(mp))); - DPRINTF(("upper-lower = %u - %u = %"Z"d", mp->mp_upper,mp->mp_lower,room)); - DPRINTF(("node size = %"Z"u", node_size)); - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PAGE_FULL; -} - -/** Delete the specified node from a page. - * @param[in] mc Cursor pointing to the node to delete. - * @param[in] ksize The size of a node. Only used if the page is - * part of a #MDB_DUPFIXED database. - */ -static void -mdb_node_del(MDB_cursor *mc, int ksize) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - indx_t indx = mc->mc_ki[mc->mc_top]; - unsigned int sz; - indx_t i, j, numkeys, ptr; - MDB_node *node; - char *base; - - DPRINTF(("delete node %u on %s page %"Z"u", indx, - IS_LEAF(mp) ? "leaf" : "branch", mdb_dbg_pgno(mp))); - numkeys = NUMKEYS(mp); - mdb_cassert(mc, indx < numkeys); - - if (IS_LEAF2(mp)) { - int x = numkeys - 1 - indx; - base = LEAF2KEY(mp, indx, ksize); - if (x) - memmove(base, base + ksize, x * ksize); - mp->mp_lower -= sizeof(indx_t); - mp->mp_upper += ksize - sizeof(indx_t); - return; - } - - node = NODEPTR(mp, indx); - sz = NODESIZE + node->mn_ksize; - if (IS_LEAF(mp)) { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - sz += sizeof(pgno_t); - else - sz += NODEDSZ(node); - } - sz = EVEN(sz); - - ptr = mp->mp_ptrs[indx]; - for (i = j = 0; i < numkeys; i++) { - if (i != indx) { - mp->mp_ptrs[j] = mp->mp_ptrs[i]; - if (mp->mp_ptrs[i] < ptr) - mp->mp_ptrs[j] += sz; - j++; - } - } - - base = (char *)mp + mp->mp_upper + PAGEBASE; - memmove(base + sz, base, ptr - mp->mp_upper); - - mp->mp_lower -= sizeof(indx_t); - mp->mp_upper += sz; -} - -/** Compact the main page after deleting a node on a subpage. - * @param[in] mp The main page to operate on. - * @param[in] indx The index of the subpage on the main page. - */ -static void -mdb_node_shrink(MDB_page *mp, indx_t indx) -{ - MDB_node *node; - MDB_page *sp, *xp; - char *base; - int nsize, delta; - indx_t i, numkeys, ptr; - - node = NODEPTR(mp, indx); - sp = (MDB_page *)NODEDATA(node); - delta = SIZELEFT(sp); - xp = (MDB_page *)((char *)sp + delta); - - /* shift subpage upward */ - if (IS_LEAF2(sp)) { - nsize = NUMKEYS(sp) * sp->mp_pad; - if (nsize & 1) - return; /* do not make the node uneven-sized */ - memmove(METADATA(xp), METADATA(sp), nsize); - } else { - int i; - numkeys = NUMKEYS(sp); - for (i=numkeys-1; i>=0; i--) - xp->mp_ptrs[i] = sp->mp_ptrs[i] - delta; - } - xp->mp_upper = sp->mp_lower; - xp->mp_lower = sp->mp_lower; - xp->mp_flags = sp->mp_flags; - xp->mp_pad = sp->mp_pad; - COPY_PGNO(xp->mp_pgno, mp->mp_pgno); - - nsize = NODEDSZ(node) - delta; - SETDSZ(node, nsize); - - /* shift lower nodes upward */ - ptr = mp->mp_ptrs[indx]; - numkeys = NUMKEYS(mp); - for (i = 0; i < numkeys; i++) { - if (mp->mp_ptrs[i] <= ptr) - mp->mp_ptrs[i] += delta; - } - - base = (char *)mp + mp->mp_upper + PAGEBASE; - memmove(base + delta, base, ptr - mp->mp_upper + NODESIZE + NODEKSZ(node)); - mp->mp_upper += delta; -} - -/** Initial setup of a sorted-dups cursor. - * Sorted duplicates are implemented as a sub-database for the given key. - * The duplicate data items are actually keys of the sub-database. - * Operations on the duplicate data items are performed using a sub-cursor - * initialized when the sub-database is first accessed. This function does - * the preliminary setup of the sub-cursor, filling in the fields that - * depend only on the parent DB. - * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized. - */ -static void -mdb_xcursor_init0(MDB_cursor *mc) -{ - MDB_xcursor *mx = mc->mc_xcursor; - - mx->mx_cursor.mc_xcursor = NULL; - mx->mx_cursor.mc_txn = mc->mc_txn; - mx->mx_cursor.mc_db = &mx->mx_db; - mx->mx_cursor.mc_dbx = &mx->mx_dbx; - mx->mx_cursor.mc_dbi = mc->mc_dbi; - mx->mx_cursor.mc_dbflag = &mx->mx_dbflag; - mx->mx_cursor.mc_snum = 0; - mx->mx_cursor.mc_top = 0; - mx->mx_cursor.mc_flags = C_SUB; - mx->mx_dbx.md_name.mv_size = 0; - mx->mx_dbx.md_name.mv_data = NULL; - mx->mx_dbx.md_cmp = mc->mc_dbx->md_dcmp; - mx->mx_dbx.md_dcmp = NULL; - mx->mx_dbx.md_rel = mc->mc_dbx->md_rel; -} - -/** Final setup of a sorted-dups cursor. - * Sets up the fields that depend on the data from the main cursor. - * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized. - * @param[in] node The data containing the #MDB_db record for the - * sorted-dup database. - */ -static void -mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node) -{ - MDB_xcursor *mx = mc->mc_xcursor; - - if (node->mn_flags & F_SUBDATA) { - memcpy(&mx->mx_db, NODEDATA(node), sizeof(MDB_db)); - mx->mx_cursor.mc_pg[0] = 0; - mx->mx_cursor.mc_snum = 0; - mx->mx_cursor.mc_top = 0; - mx->mx_cursor.mc_flags = C_SUB; - } else { - MDB_page *fp = NODEDATA(node); - mx->mx_db.md_pad = mc->mc_pg[mc->mc_top]->mp_pad; - mx->mx_db.md_flags = 0; - mx->mx_db.md_depth = 1; - mx->mx_db.md_branch_pages = 0; - mx->mx_db.md_leaf_pages = 1; - mx->mx_db.md_overflow_pages = 0; - mx->mx_db.md_entries = NUMKEYS(fp); - COPY_PGNO(mx->mx_db.md_root, fp->mp_pgno); - mx->mx_cursor.mc_snum = 1; - mx->mx_cursor.mc_top = 0; - mx->mx_cursor.mc_flags = C_INITIALIZED|C_SUB; - mx->mx_cursor.mc_pg[0] = fp; - mx->mx_cursor.mc_ki[0] = 0; - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - mx->mx_db.md_flags = MDB_DUPFIXED; - mx->mx_db.md_pad = fp->mp_pad; - if (mc->mc_db->md_flags & MDB_INTEGERDUP) - mx->mx_db.md_flags |= MDB_INTEGERKEY; - } - } - DPRINTF(("Sub-db -%u root page %"Z"u", mx->mx_cursor.mc_dbi, - mx->mx_db.md_root)); - mx->mx_dbflag = DB_VALID|DB_DIRTY; /* DB_DIRTY guides mdb_cursor_touch */ -#if UINT_MAX < SIZE_MAX - if (mx->mx_dbx.md_cmp == mdb_cmp_int && mx->mx_db.md_pad == sizeof(size_t)) - mx->mx_dbx.md_cmp = mdb_cmp_clong; -#endif -} - -/** Initialize a cursor for a given transaction and database. */ -static void -mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx) -{ - mc->mc_next = NULL; - mc->mc_backup = NULL; - mc->mc_dbi = dbi; - mc->mc_txn = txn; - mc->mc_db = &txn->mt_dbs[dbi]; - mc->mc_dbx = &txn->mt_dbxs[dbi]; - mc->mc_dbflag = &txn->mt_dbflags[dbi]; - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_pg[0] = 0; - mc->mc_flags = 0; - if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) { - mdb_tassert(txn, mx != NULL); - mc->mc_xcursor = mx; - mdb_xcursor_init0(mc); - } else { - mc->mc_xcursor = NULL; - } - if (*mc->mc_dbflag & DB_STALE) { - mdb_page_search(mc, NULL, MDB_PS_ROOTONLY); - } -} - -int -mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **ret) -{ - MDB_cursor *mc; - size_t size = sizeof(MDB_cursor); - - if (!ret || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_ERROR) - return MDB_BAD_TXN; - - /* Allow read access to the freelist */ - if (!dbi && !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) - return EINVAL; - - if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) - size += sizeof(MDB_xcursor); - - if ((mc = malloc(size)) != NULL) { - mdb_cursor_init(mc, txn, dbi, (MDB_xcursor *)(mc + 1)); - if (txn->mt_cursors) { - mc->mc_next = txn->mt_cursors[dbi]; - txn->mt_cursors[dbi] = mc; - mc->mc_flags |= C_UNTRACK; - } - } else { - return ENOMEM; - } - - *ret = mc; - - return MDB_SUCCESS; -} - -int -mdb_cursor_renew(MDB_txn *txn, MDB_cursor *mc) -{ - if (!mc || !TXN_DBI_EXIST(txn, mc->mc_dbi)) - return EINVAL; - - if ((mc->mc_flags & C_UNTRACK) || txn->mt_cursors) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_ERROR) - return MDB_BAD_TXN; - - mdb_cursor_init(mc, txn, mc->mc_dbi, mc->mc_xcursor); - return MDB_SUCCESS; -} - -/* Return the count of duplicate data items for the current key */ -int -mdb_cursor_count(MDB_cursor *mc, size_t *countp) -{ - MDB_node *leaf; - - if (mc == NULL || countp == NULL) - return EINVAL; - - if (mc->mc_xcursor == NULL) - return MDB_INCOMPATIBLE; - - if (mc->mc_txn->mt_flags & MDB_TXN_ERROR) - return MDB_BAD_TXN; - - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - - if (!mc->mc_snum || (mc->mc_flags & C_EOF)) - return MDB_NOTFOUND; - - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - *countp = 1; - } else { - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) - return EINVAL; - - *countp = mc->mc_xcursor->mx_db.md_entries; - } - return MDB_SUCCESS; -} - -void -mdb_cursor_close(MDB_cursor *mc) -{ - if (mc && !mc->mc_backup) { - /* remove from txn, if tracked */ - if ((mc->mc_flags & C_UNTRACK) && mc->mc_txn->mt_cursors) { - MDB_cursor **prev = &mc->mc_txn->mt_cursors[mc->mc_dbi]; - while (*prev && *prev != mc) prev = &(*prev)->mc_next; - if (*prev == mc) - *prev = mc->mc_next; - } - free(mc); - } -} - -MDB_txn * -mdb_cursor_txn(MDB_cursor *mc) -{ - if (!mc) return NULL; - return mc->mc_txn; -} - -MDB_dbi -mdb_cursor_dbi(MDB_cursor *mc) -{ - return mc->mc_dbi; -} - -/** Replace the key for a branch node with a new key. - * @param[in] mc Cursor pointing to the node to operate on. - * @param[in] key The new key to use. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_update_key(MDB_cursor *mc, MDB_val *key) -{ - MDB_page *mp; - MDB_node *node; - char *base; - size_t len; - int delta, ksize, oksize; - indx_t ptr, i, numkeys, indx; - DKBUF; - - indx = mc->mc_ki[mc->mc_top]; - mp = mc->mc_pg[mc->mc_top]; - node = NODEPTR(mp, indx); - ptr = mp->mp_ptrs[indx]; -#if MDB_DEBUG - { - MDB_val k2; - char kbuf2[DKBUF_MAXKEYSIZE*2+1]; - k2.mv_data = NODEKEY(node); - k2.mv_size = node->mn_ksize; - DPRINTF(("update key %u (ofs %u) [%s] to [%s] on page %"Z"u", - indx, ptr, - mdb_dkey(&k2, kbuf2), - DKEY(key), - mp->mp_pgno)); - } -#endif - - /* Sizes must be 2-byte aligned. */ - ksize = EVEN(key->mv_size); - oksize = EVEN(node->mn_ksize); - delta = ksize - oksize; - - /* Shift node contents if EVEN(key length) changed. */ - if (delta) { - if (delta > 0 && SIZELEFT(mp) < delta) { - pgno_t pgno; - /* not enough space left, do a delete and split */ - DPRINTF(("Not enough room, delta = %d, splitting...", delta)); - pgno = NODEPGNO(node); - mdb_node_del(mc, 0); - return mdb_page_split(mc, key, NULL, pgno, MDB_SPLIT_REPLACE); - } - - numkeys = NUMKEYS(mp); - for (i = 0; i < numkeys; i++) { - if (mp->mp_ptrs[i] <= ptr) - mp->mp_ptrs[i] -= delta; - } - - base = (char *)mp + mp->mp_upper + PAGEBASE; - len = ptr - mp->mp_upper + NODESIZE; - memmove(base - delta, base, len); - mp->mp_upper -= delta; - - node = NODEPTR(mp, indx); - } - - /* But even if no shift was needed, update ksize */ - if (node->mn_ksize != key->mv_size) - node->mn_ksize = key->mv_size; - - if (key->mv_size) - memcpy(NODEKEY(node), key->mv_data, key->mv_size); - - return MDB_SUCCESS; -} - -static void -mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst); - -/** Move a node from csrc to cdst. - */ -static int -mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst) -{ - MDB_node *srcnode; - MDB_val key, data; - pgno_t srcpg; - MDB_cursor mn; - int rc; - unsigned short flags; - - DKBUF; - - /* Mark src and dst as dirty. */ - if ((rc = mdb_page_touch(csrc)) || - (rc = mdb_page_touch(cdst))) - return rc; - - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top], key.mv_size); - data.mv_size = 0; - data.mv_data = NULL; - srcpg = 0; - flags = 0; - } else { - srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top]); - mdb_cassert(csrc, !((size_t)srcnode & 1)); - srcpg = NODEPGNO(srcnode); - flags = srcnode->mn_flags; - if (csrc->mc_ki[csrc->mc_top] == 0 && IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { - unsigned int snum = csrc->mc_snum; - MDB_node *s2; - /* must find the lowest key below src */ - rc = mdb_page_search_lowest(csrc); - if (rc) - return rc; - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size); - } else { - s2 = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); - key.mv_size = NODEKSZ(s2); - key.mv_data = NODEKEY(s2); - } - csrc->mc_snum = snum--; - csrc->mc_top = snum; - } else { - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - data.mv_size = NODEDSZ(srcnode); - data.mv_data = NODEDATA(srcnode); - } - if (IS_BRANCH(cdst->mc_pg[cdst->mc_top]) && cdst->mc_ki[cdst->mc_top] == 0) { - unsigned int snum = cdst->mc_snum; - MDB_node *s2; - MDB_val bkey; - /* must find the lowest key below dst */ - mdb_cursor_copy(cdst, &mn); - rc = mdb_page_search_lowest(&mn); - if (rc) - return rc; - if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { - bkey.mv_size = mn.mc_db->md_pad; - bkey.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, bkey.mv_size); - } else { - s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); - bkey.mv_size = NODEKSZ(s2); - bkey.mv_data = NODEKEY(s2); - } - mn.mc_snum = snum--; - mn.mc_top = snum; - mn.mc_ki[snum] = 0; - rc = mdb_update_key(&mn, &bkey); - if (rc) - return rc; - } - - DPRINTF(("moving %s node %u [%s] on page %"Z"u to node %u on page %"Z"u", - IS_LEAF(csrc->mc_pg[csrc->mc_top]) ? "leaf" : "branch", - csrc->mc_ki[csrc->mc_top], - DKEY(&key), - csrc->mc_pg[csrc->mc_top]->mp_pgno, - cdst->mc_ki[cdst->mc_top], cdst->mc_pg[cdst->mc_top]->mp_pgno)); - - /* Add the node to the destination page. - */ - rc = mdb_node_add(cdst, cdst->mc_ki[cdst->mc_top], &key, &data, srcpg, flags); - if (rc != MDB_SUCCESS) - return rc; - - /* Delete the node from the source page. - */ - mdb_node_del(csrc, key.mv_size); - - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = csrc->mc_dbi; - MDB_page *mp = csrc->mc_pg[csrc->mc_top]; - - for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (csrc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == csrc) continue; - if (m3->mc_pg[csrc->mc_top] == mp && m3->mc_ki[csrc->mc_top] == - csrc->mc_ki[csrc->mc_top]) { - m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; - m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; - } - } - } - - /* Update the parent separators. - */ - if (csrc->mc_ki[csrc->mc_top] == 0) { - if (csrc->mc_ki[csrc->mc_top-1] != 0) { - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size); - } else { - srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - DPRINTF(("update separator for source page %"Z"u to [%s]", - csrc->mc_pg[csrc->mc_top]->mp_pgno, DKEY(&key))); - mdb_cursor_copy(csrc, &mn); - mn.mc_snum--; - mn.mc_top--; - if ((rc = mdb_update_key(&mn, &key)) != MDB_SUCCESS) - return rc; - } - if (IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { - MDB_val nullkey; - indx_t ix = csrc->mc_ki[csrc->mc_top]; - nullkey.mv_size = 0; - csrc->mc_ki[csrc->mc_top] = 0; - rc = mdb_update_key(csrc, &nullkey); - csrc->mc_ki[csrc->mc_top] = ix; - mdb_cassert(csrc, rc == MDB_SUCCESS); - } - } - - if (cdst->mc_ki[cdst->mc_top] == 0) { - if (cdst->mc_ki[cdst->mc_top-1] != 0) { - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_data = LEAF2KEY(cdst->mc_pg[cdst->mc_top], 0, key.mv_size); - } else { - srcnode = NODEPTR(cdst->mc_pg[cdst->mc_top], 0); - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - DPRINTF(("update separator for destination page %"Z"u to [%s]", - cdst->mc_pg[cdst->mc_top]->mp_pgno, DKEY(&key))); - mdb_cursor_copy(cdst, &mn); - mn.mc_snum--; - mn.mc_top--; - if ((rc = mdb_update_key(&mn, &key)) != MDB_SUCCESS) - return rc; - } - if (IS_BRANCH(cdst->mc_pg[cdst->mc_top])) { - MDB_val nullkey; - indx_t ix = cdst->mc_ki[cdst->mc_top]; - nullkey.mv_size = 0; - cdst->mc_ki[cdst->mc_top] = 0; - rc = mdb_update_key(cdst, &nullkey); - cdst->mc_ki[cdst->mc_top] = ix; - mdb_cassert(csrc, rc == MDB_SUCCESS); - } - } - - return MDB_SUCCESS; -} - -/** Merge one page into another. - * The nodes from the page pointed to by \b csrc will - * be copied to the page pointed to by \b cdst and then - * the \b csrc page will be freed. - * @param[in] csrc Cursor pointing to the source page. - * @param[in] cdst Cursor pointing to the destination page. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst) -{ - MDB_page *psrc, *pdst; - MDB_node *srcnode; - MDB_val key, data; - unsigned nkeys; - int rc; - indx_t i, j; - - psrc = csrc->mc_pg[csrc->mc_top]; - pdst = cdst->mc_pg[cdst->mc_top]; - - DPRINTF(("merging page %"Z"u into %"Z"u", psrc->mp_pgno, pdst->mp_pgno)); - - mdb_cassert(csrc, csrc->mc_snum > 1); /* can't merge root page */ - mdb_cassert(csrc, cdst->mc_snum > 1); - - /* Mark dst as dirty. */ - if ((rc = mdb_page_touch(cdst))) - return rc; - - /* Move all nodes from src to dst. - */ - j = nkeys = NUMKEYS(pdst); - if (IS_LEAF2(psrc)) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = METADATA(psrc); - for (i = 0; i < NUMKEYS(psrc); i++, j++) { - rc = mdb_node_add(cdst, j, &key, NULL, 0, 0); - if (rc != MDB_SUCCESS) - return rc; - key.mv_data = (char *)key.mv_data + key.mv_size; - } - } else { - for (i = 0; i < NUMKEYS(psrc); i++, j++) { - srcnode = NODEPTR(psrc, i); - if (i == 0 && IS_BRANCH(psrc)) { - MDB_cursor mn; - MDB_node *s2; - mdb_cursor_copy(csrc, &mn); - /* must find the lowest key below src */ - rc = mdb_page_search_lowest(&mn); - if (rc) - return rc; - if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { - key.mv_size = mn.mc_db->md_pad; - key.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, key.mv_size); - } else { - s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); - key.mv_size = NODEKSZ(s2); - key.mv_data = NODEKEY(s2); - } - } else { - key.mv_size = srcnode->mn_ksize; - key.mv_data = NODEKEY(srcnode); - } - - data.mv_size = NODEDSZ(srcnode); - data.mv_data = NODEDATA(srcnode); - rc = mdb_node_add(cdst, j, &key, &data, NODEPGNO(srcnode), srcnode->mn_flags); - if (rc != MDB_SUCCESS) - return rc; - } - } - - DPRINTF(("dst page %"Z"u now has %u keys (%.1f%% filled)", - pdst->mp_pgno, NUMKEYS(pdst), - (float)PAGEFILL(cdst->mc_txn->mt_env, pdst) / 10)); - - /* Unlink the src page from parent and add to free list. - */ - csrc->mc_top--; - mdb_node_del(csrc, 0); - if (csrc->mc_ki[csrc->mc_top] == 0) { - key.mv_size = 0; - rc = mdb_update_key(csrc, &key); - if (rc) { - csrc->mc_top++; - return rc; - } - } - csrc->mc_top++; - - psrc = csrc->mc_pg[csrc->mc_top]; - /* If not operating on FreeDB, allow this page to be reused - * in this txn. Otherwise just add to free list. - */ - rc = mdb_page_loose(csrc, psrc); - if (rc) - return rc; - if (IS_LEAF(psrc)) - csrc->mc_db->md_leaf_pages--; - else - csrc->mc_db->md_branch_pages--; - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = csrc->mc_dbi; - - for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (csrc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == csrc) continue; - if (m3->mc_snum < csrc->mc_snum) continue; - if (m3->mc_pg[csrc->mc_top] == psrc) { - m3->mc_pg[csrc->mc_top] = pdst; - m3->mc_ki[csrc->mc_top] += nkeys; - } - } - } - { - unsigned int snum = cdst->mc_snum; - uint16_t depth = cdst->mc_db->md_depth; - mdb_cursor_pop(cdst); - rc = mdb_rebalance(cdst); - /* Did the tree shrink? */ - if (depth > cdst->mc_db->md_depth) - snum--; - cdst->mc_snum = snum; - cdst->mc_top = snum-1; - } - return rc; -} - -/** Copy the contents of a cursor. - * @param[in] csrc The cursor to copy from. - * @param[out] cdst The cursor to copy to. - */ -static void -mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst) -{ - unsigned int i; - - cdst->mc_txn = csrc->mc_txn; - cdst->mc_dbi = csrc->mc_dbi; - cdst->mc_db = csrc->mc_db; - cdst->mc_dbx = csrc->mc_dbx; - cdst->mc_snum = csrc->mc_snum; - cdst->mc_top = csrc->mc_top; - cdst->mc_flags = csrc->mc_flags; - - for (i=0; imc_snum; i++) { - cdst->mc_pg[i] = csrc->mc_pg[i]; - cdst->mc_ki[i] = csrc->mc_ki[i]; - } -} - -/** Rebalance the tree after a delete operation. - * @param[in] mc Cursor pointing to the page where rebalancing - * should begin. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_rebalance(MDB_cursor *mc) -{ - MDB_node *node; - int rc; - unsigned int ptop, minkeys; - MDB_cursor mn; - indx_t oldki; - - minkeys = 1 + (IS_BRANCH(mc->mc_pg[mc->mc_top])); - DPRINTF(("rebalancing %s page %"Z"u (has %u keys, %.1f%% full)", - IS_LEAF(mc->mc_pg[mc->mc_top]) ? "leaf" : "branch", - mdb_dbg_pgno(mc->mc_pg[mc->mc_top]), NUMKEYS(mc->mc_pg[mc->mc_top]), - (float)PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) / 10)); - - if (PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) >= FILL_THRESHOLD && - NUMKEYS(mc->mc_pg[mc->mc_top]) >= minkeys) { - DPRINTF(("no need to rebalance page %"Z"u, above fill threshold", - mdb_dbg_pgno(mc->mc_pg[mc->mc_top]))); - return MDB_SUCCESS; - } - - if (mc->mc_snum < 2) { - MDB_page *mp = mc->mc_pg[0]; - if (IS_SUBP(mp)) { - DPUTS("Can't rebalance a subpage, ignoring"); - return MDB_SUCCESS; - } - if (NUMKEYS(mp) == 0) { - DPUTS("tree is completely empty"); - mc->mc_db->md_root = P_INVALID; - mc->mc_db->md_depth = 0; - mc->mc_db->md_leaf_pages = 0; - rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno); - if (rc) - return rc; - /* Adjust cursors pointing to mp */ - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_flags &= ~C_INITIALIZED; - { - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3->mc_snum < mc->mc_snum) continue; - if (m3->mc_pg[0] == mp) { - m3->mc_snum = 0; - m3->mc_top = 0; - m3->mc_flags &= ~C_INITIALIZED; - } - } - } - } else if (IS_BRANCH(mp) && NUMKEYS(mp) == 1) { - int i; - DPUTS("collapsing root page!"); - rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno); - if (rc) - return rc; - mc->mc_db->md_root = NODEPGNO(NODEPTR(mp, 0)); - rc = mdb_page_get(mc->mc_txn,mc->mc_db->md_root,&mc->mc_pg[0],NULL); - if (rc) - return rc; - mc->mc_db->md_depth--; - mc->mc_db->md_branch_pages--; - mc->mc_ki[0] = mc->mc_ki[1]; - for (i = 1; imc_db->md_depth; i++) { - mc->mc_pg[i] = mc->mc_pg[i+1]; - mc->mc_ki[i] = mc->mc_ki[i+1]; - } - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc || m3->mc_snum < mc->mc_snum) continue; - if (m3->mc_pg[0] == mp) { - m3->mc_snum--; - m3->mc_top--; - for (i=0; imc_snum; i++) { - m3->mc_pg[i] = m3->mc_pg[i+1]; - m3->mc_ki[i] = m3->mc_ki[i+1]; - } - } - } - } - } else - DPUTS("root page doesn't need rebalancing"); - return MDB_SUCCESS; - } - - /* The parent (branch page) must have at least 2 pointers, - * otherwise the tree is invalid. - */ - ptop = mc->mc_top-1; - mdb_cassert(mc, NUMKEYS(mc->mc_pg[ptop]) > 1); - - /* Leaf page fill factor is below the threshold. - * Try to move keys from left or right neighbor, or - * merge with a neighbor page. - */ - - /* Find neighbors. - */ - mdb_cursor_copy(mc, &mn); - mn.mc_xcursor = NULL; - - oldki = mc->mc_ki[mc->mc_top]; - if (mc->mc_ki[ptop] == 0) { - /* We're the leftmost leaf in our parent. - */ - DPUTS("reading right neighbor"); - mn.mc_ki[ptop]++; - node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); - rc = mdb_page_get(mc->mc_txn,NODEPGNO(node),&mn.mc_pg[mn.mc_top],NULL); - if (rc) - return rc; - mn.mc_ki[mn.mc_top] = 0; - mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]); - } else { - /* There is at least one neighbor to the left. - */ - DPUTS("reading left neighbor"); - mn.mc_ki[ptop]--; - node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); - rc = mdb_page_get(mc->mc_txn,NODEPGNO(node),&mn.mc_pg[mn.mc_top],NULL); - if (rc) - return rc; - mn.mc_ki[mn.mc_top] = NUMKEYS(mn.mc_pg[mn.mc_top]) - 1; - mc->mc_ki[mc->mc_top] = 0; - } - - DPRINTF(("found neighbor page %"Z"u (%u keys, %.1f%% full)", - mn.mc_pg[mn.mc_top]->mp_pgno, NUMKEYS(mn.mc_pg[mn.mc_top]), - (float)PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) / 10)); - - /* If the neighbor page is above threshold and has enough keys, - * move one key from it. Otherwise we should try to merge them. - * (A branch page must never have less than 2 keys.) - */ - minkeys = 1 + (IS_BRANCH(mn.mc_pg[mn.mc_top])); - if (PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) >= FILL_THRESHOLD && NUMKEYS(mn.mc_pg[mn.mc_top]) > minkeys) { - rc = mdb_node_move(&mn, mc); - if (mc->mc_ki[ptop]) { - oldki++; - } - } else { - if (mc->mc_ki[ptop] == 0) { - rc = mdb_page_merge(&mn, mc); - } else { - oldki += NUMKEYS(mn.mc_pg[mn.mc_top]); - mn.mc_ki[mn.mc_top] += mc->mc_ki[mn.mc_top] + 1; - rc = mdb_page_merge(mc, &mn); - mdb_cursor_copy(&mn, mc); - } - mc->mc_flags &= ~C_EOF; - } - mc->mc_ki[mc->mc_top] = oldki; - return rc; -} - -/** Complete a delete operation started by #mdb_cursor_del(). */ -static int -mdb_cursor_del0(MDB_cursor *mc) -{ - int rc; - MDB_page *mp; - indx_t ki; - unsigned int nkeys; - - ki = mc->mc_ki[mc->mc_top]; - mdb_node_del(mc, mc->mc_db->md_pad); - mc->mc_db->md_entries--; - rc = mdb_rebalance(mc); - - if (rc == MDB_SUCCESS) { - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - mp = mc->mc_pg[mc->mc_top]; - nkeys = NUMKEYS(mp); - - /* if mc points past last node in page, find next sibling */ - if (mc->mc_ki[mc->mc_top] >= nkeys) { - rc = mdb_cursor_sibling(mc, 1); - if (rc == MDB_NOTFOUND) { - mc->mc_flags |= C_EOF; - rc = MDB_SUCCESS; - } - } - - /* Adjust other cursors pointing to mp */ - for (m2 = mc->mc_txn->mt_cursors[dbi]; !rc && m2; m2=m2->mc_next) { - m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; - if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED)) - continue; - if (m3 == mc || m3->mc_snum < mc->mc_snum) - continue; - if (m3->mc_pg[mc->mc_top] == mp) { - if (m3->mc_ki[mc->mc_top] >= ki) { - m3->mc_flags |= C_DEL; - if (m3->mc_ki[mc->mc_top] > ki) - m3->mc_ki[mc->mc_top]--; - else if (mc->mc_db->md_flags & MDB_DUPSORT) - m3->mc_xcursor->mx_cursor.mc_flags |= C_EOF; - } - if (m3->mc_ki[mc->mc_top] >= nkeys) { - rc = mdb_cursor_sibling(m3, 1); - if (rc == MDB_NOTFOUND) { - m3->mc_flags |= C_EOF; - rc = MDB_SUCCESS; - } - } - } - } - mc->mc_flags |= C_DEL; - } - - if (rc) - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_del(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data) -{ - if (!key || dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_ERROR)) - return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (!F_ISSET(txn->mt_dbs[dbi].md_flags, MDB_DUPSORT)) { - /* must ignore any data */ - data = NULL; - } - - return mdb_del0(txn, dbi, key, data, 0); -} - -static int -mdb_del0(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data, unsigned flags) -{ - MDB_cursor mc; - MDB_xcursor mx; - MDB_cursor_op op; - MDB_val rdata, *xdata; - int rc, exact = 0; - DKBUF; - - DPRINTF(("====> delete db %u key [%s]", dbi, DKEY(key))); - - mdb_cursor_init(&mc, txn, dbi, &mx); - - if (data) { - op = MDB_GET_BOTH; - rdata = *data; - xdata = &rdata; - } else { - op = MDB_SET; - xdata = NULL; - flags |= MDB_NODUPDATA; - } - rc = mdb_cursor_set(&mc, key, xdata, op, &exact); - if (rc == 0) { - /* let mdb_page_split know about this cursor if needed: - * delete will trigger a rebalance; if it needs to move - * a node from one page to another, it will have to - * update the parent's separator key(s). If the new sepkey - * is larger than the current one, the parent page may - * run out of space, triggering a split. We need this - * cursor to be consistent until the end of the rebalance. - */ - mc.mc_flags |= C_UNTRACK; - mc.mc_next = txn->mt_cursors[dbi]; - txn->mt_cursors[dbi] = &mc; - rc = mdb_cursor_del(&mc, flags); - txn->mt_cursors[dbi] = mc.mc_next; - } - return rc; -} - -/** Split a page and insert a new node. - * @param[in,out] mc Cursor pointing to the page and desired insertion index. - * The cursor will be updated to point to the actual page and index where - * the node got inserted after the split. - * @param[in] newkey The key for the newly inserted node. - * @param[in] newdata The data for the newly inserted node. - * @param[in] newpgno The page number, if the new node is a branch node. - * @param[in] nflags The #NODE_ADD_FLAGS for the new node. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, pgno_t newpgno, - unsigned int nflags) -{ - unsigned int flags; - int rc = MDB_SUCCESS, new_root = 0, did_split = 0; - indx_t newindx; - pgno_t pgno = 0; - int i, j, split_indx, nkeys, pmax; - MDB_env *env = mc->mc_txn->mt_env; - MDB_node *node; - MDB_val sepkey, rkey, xdata, *rdata = &xdata; - MDB_page *copy = NULL; - MDB_page *mp, *rp, *pp; - int ptop; - MDB_cursor mn; - DKBUF; - - mp = mc->mc_pg[mc->mc_top]; - newindx = mc->mc_ki[mc->mc_top]; - nkeys = NUMKEYS(mp); - - DPRINTF(("-----> splitting %s page %"Z"u and adding [%s] at index %i/%i", - IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno, - DKEY(newkey), mc->mc_ki[mc->mc_top], nkeys)); - - /* Create a right sibling. */ - if ((rc = mdb_page_new(mc, mp->mp_flags, 1, &rp))) - return rc; - DPRINTF(("new right sibling: page %"Z"u", rp->mp_pgno)); - - if (mc->mc_snum < 2) { - if ((rc = mdb_page_new(mc, P_BRANCH, 1, &pp))) - goto done; - /* shift current top to make room for new parent */ - mc->mc_pg[1] = mc->mc_pg[0]; - mc->mc_ki[1] = mc->mc_ki[0]; - mc->mc_pg[0] = pp; - mc->mc_ki[0] = 0; - mc->mc_db->md_root = pp->mp_pgno; - DPRINTF(("root split! new root = %"Z"u", pp->mp_pgno)); - mc->mc_db->md_depth++; - new_root = 1; - - /* Add left (implicit) pointer. */ - if ((rc = mdb_node_add(mc, 0, NULL, NULL, mp->mp_pgno, 0)) != MDB_SUCCESS) { - /* undo the pre-push */ - mc->mc_pg[0] = mc->mc_pg[1]; - mc->mc_ki[0] = mc->mc_ki[1]; - mc->mc_db->md_root = mp->mp_pgno; - mc->mc_db->md_depth--; - goto done; - } - mc->mc_snum = 2; - mc->mc_top = 1; - ptop = 0; - } else { - ptop = mc->mc_top-1; - DPRINTF(("parent branch page is %"Z"u", mc->mc_pg[ptop]->mp_pgno)); - } - - mc->mc_flags |= C_SPLITTING; - mdb_cursor_copy(mc, &mn); - mn.mc_pg[mn.mc_top] = rp; - mn.mc_ki[ptop] = mc->mc_ki[ptop]+1; - - if (nflags & MDB_APPEND) { - mn.mc_ki[mn.mc_top] = 0; - sepkey = *newkey; - split_indx = newindx; - nkeys = 0; - } else { - - split_indx = (nkeys+1) / 2; - - if (IS_LEAF2(rp)) { - char *split, *ins; - int x; - unsigned int lsize, rsize, ksize; - /* Move half of the keys to the right sibling */ - x = mc->mc_ki[mc->mc_top] - split_indx; - ksize = mc->mc_db->md_pad; - split = LEAF2KEY(mp, split_indx, ksize); - rsize = (nkeys - split_indx) * ksize; - lsize = (nkeys - split_indx) * sizeof(indx_t); - mp->mp_lower -= lsize; - rp->mp_lower += lsize; - mp->mp_upper += rsize - lsize; - rp->mp_upper -= rsize - lsize; - sepkey.mv_size = ksize; - if (newindx == split_indx) { - sepkey.mv_data = newkey->mv_data; - } else { - sepkey.mv_data = split; - } - if (x<0) { - ins = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], ksize); - memcpy(rp->mp_ptrs, split, rsize); - sepkey.mv_data = rp->mp_ptrs; - memmove(ins+ksize, ins, (split_indx - mc->mc_ki[mc->mc_top]) * ksize); - memcpy(ins, newkey->mv_data, ksize); - mp->mp_lower += sizeof(indx_t); - mp->mp_upper -= ksize - sizeof(indx_t); - } else { - if (x) - memcpy(rp->mp_ptrs, split, x * ksize); - ins = LEAF2KEY(rp, x, ksize); - memcpy(ins, newkey->mv_data, ksize); - memcpy(ins+ksize, split + x * ksize, rsize - x * ksize); - rp->mp_lower += sizeof(indx_t); - rp->mp_upper -= ksize - sizeof(indx_t); - mc->mc_ki[mc->mc_top] = x; - mc->mc_pg[mc->mc_top] = rp; - } - } else { - int psize, nsize, k; - /* Maximum free space in an empty page */ - pmax = env->me_psize - PAGEHDRSZ; - if (IS_LEAF(mp)) - nsize = mdb_leaf_size(env, newkey, newdata); - else - nsize = mdb_branch_size(env, newkey); - nsize = EVEN(nsize); - - /* grab a page to hold a temporary copy */ - copy = mdb_page_malloc(mc->mc_txn, 1); - if (copy == NULL) { - rc = ENOMEM; - goto done; - } - copy->mp_pgno = mp->mp_pgno; - copy->mp_flags = mp->mp_flags; - copy->mp_lower = (PAGEHDRSZ-PAGEBASE); - copy->mp_upper = env->me_psize - PAGEBASE; - - /* prepare to insert */ - for (i=0, j=0; imp_ptrs[j++] = 0; - } - copy->mp_ptrs[j++] = mp->mp_ptrs[i]; - } - - /* When items are relatively large the split point needs - * to be checked, because being off-by-one will make the - * difference between success or failure in mdb_node_add. - * - * It's also relevant if a page happens to be laid out - * such that one half of its nodes are all "small" and - * the other half of its nodes are "large." If the new - * item is also "large" and falls on the half with - * "large" nodes, it also may not fit. - * - * As a final tweak, if the new item goes on the last - * spot on the page (and thus, onto the new page), bias - * the split so the new page is emptier than the old page. - * This yields better packing during sequential inserts. - */ - if (nkeys < 20 || nsize > pmax/16 || newindx >= nkeys) { - /* Find split point */ - psize = 0; - if (newindx <= split_indx || newindx >= nkeys) { - i = 0; j = 1; - k = newindx >= nkeys ? nkeys : split_indx+2; - } else { - i = nkeys; j = -1; - k = split_indx-1; - } - for (; i!=k; i+=j) { - if (i == newindx) { - psize += nsize; - node = NULL; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE); - psize += NODESIZE + NODEKSZ(node) + sizeof(indx_t); - if (IS_LEAF(mp)) { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - psize += sizeof(pgno_t); - else - psize += NODEDSZ(node); - } - psize = EVEN(psize); - } - if (psize > pmax || i == k-j) { - split_indx = i + (j<0); - break; - } - } - } - if (split_indx == newindx) { - sepkey.mv_size = newkey->mv_size; - sepkey.mv_data = newkey->mv_data; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[split_indx] + PAGEBASE); - sepkey.mv_size = node->mn_ksize; - sepkey.mv_data = NODEKEY(node); - } - } - } - - DPRINTF(("separator is %d [%s]", split_indx, DKEY(&sepkey))); - - /* Copy separator key to the parent. - */ - if (SIZELEFT(mn.mc_pg[ptop]) < mdb_branch_size(env, &sepkey)) { - mn.mc_snum--; - mn.mc_top--; - did_split = 1; - rc = mdb_page_split(&mn, &sepkey, NULL, rp->mp_pgno, 0); - if (rc) - goto done; - - /* root split? */ - if (mn.mc_snum == mc->mc_snum) { - mc->mc_pg[mc->mc_snum] = mc->mc_pg[mc->mc_top]; - mc->mc_ki[mc->mc_snum] = mc->mc_ki[mc->mc_top]; - mc->mc_pg[mc->mc_top] = mc->mc_pg[ptop]; - mc->mc_ki[mc->mc_top] = mc->mc_ki[ptop]; - mc->mc_snum++; - mc->mc_top++; - ptop++; - } - /* Right page might now have changed parent. - * Check if left page also changed parent. - */ - if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && - mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { - for (i=0; imc_pg[i] = mn.mc_pg[i]; - mc->mc_ki[i] = mn.mc_ki[i]; - } - mc->mc_pg[ptop] = mn.mc_pg[ptop]; - if (mn.mc_ki[ptop]) { - mc->mc_ki[ptop] = mn.mc_ki[ptop] - 1; - } else { - /* find right page's left sibling */ - mc->mc_ki[ptop] = mn.mc_ki[ptop]; - mdb_cursor_sibling(mc, 0); - } - } - } else { - mn.mc_top--; - rc = mdb_node_add(&mn, mn.mc_ki[ptop], &sepkey, NULL, rp->mp_pgno, 0); - mn.mc_top++; - } - mc->mc_flags ^= C_SPLITTING; - if (rc != MDB_SUCCESS) { - goto done; - } - if (nflags & MDB_APPEND) { - mc->mc_pg[mc->mc_top] = rp; - mc->mc_ki[mc->mc_top] = 0; - rc = mdb_node_add(mc, 0, newkey, newdata, newpgno, nflags); - if (rc) - goto done; - for (i=0; imc_top; i++) - mc->mc_ki[i] = mn.mc_ki[i]; - } else if (!IS_LEAF2(mp)) { - /* Move nodes */ - mc->mc_pg[mc->mc_top] = rp; - i = split_indx; - j = 0; - do { - if (i == newindx) { - rkey.mv_data = newkey->mv_data; - rkey.mv_size = newkey->mv_size; - if (IS_LEAF(mp)) { - rdata = newdata; - } else - pgno = newpgno; - flags = nflags; - /* Update index for the new key. */ - mc->mc_ki[mc->mc_top] = j; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE); - rkey.mv_data = NODEKEY(node); - rkey.mv_size = node->mn_ksize; - if (IS_LEAF(mp)) { - xdata.mv_data = NODEDATA(node); - xdata.mv_size = NODEDSZ(node); - rdata = &xdata; - } else - pgno = NODEPGNO(node); - flags = node->mn_flags; - } - - if (!IS_LEAF(mp) && j == 0) { - /* First branch index doesn't need key data. */ - rkey.mv_size = 0; - } - - rc = mdb_node_add(mc, j, &rkey, rdata, pgno, flags); - if (rc) - goto done; - if (i == nkeys) { - i = 0; - j = 0; - mc->mc_pg[mc->mc_top] = copy; - } else { - i++; - j++; - } - } while (i != split_indx); - - nkeys = NUMKEYS(copy); - for (i=0; imp_ptrs[i] = copy->mp_ptrs[i]; - mp->mp_lower = copy->mp_lower; - mp->mp_upper = copy->mp_upper; - memcpy(NODEPTR(mp, nkeys-1), NODEPTR(copy, nkeys-1), - env->me_psize - copy->mp_upper - PAGEBASE); - - /* reset back to original page */ - if (newindx < split_indx) { - mc->mc_pg[mc->mc_top] = mp; - if (nflags & MDB_RESERVE) { - node = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - if (!(node->mn_flags & F_BIGDATA)) - newdata->mv_data = NODEDATA(node); - } - } else { - mc->mc_pg[mc->mc_top] = rp; - mc->mc_ki[ptop]++; - /* Make sure mc_ki is still valid. - */ - if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && - mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { - for (i=0; i<=ptop; i++) { - mc->mc_pg[i] = mn.mc_pg[i]; - mc->mc_ki[i] = mn.mc_ki[i]; - } - } - } - } - - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - int fixup = NUMKEYS(mp); - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc) - continue; - if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED)) - continue; - if (m3->mc_flags & C_SPLITTING) - continue; - if (new_root) { - int k; - /* root split */ - for (k=m3->mc_top; k>=0; k--) { - m3->mc_ki[k+1] = m3->mc_ki[k]; - m3->mc_pg[k+1] = m3->mc_pg[k]; - } - if (m3->mc_ki[0] >= split_indx) { - m3->mc_ki[0] = 1; - } else { - m3->mc_ki[0] = 0; - } - m3->mc_pg[0] = mc->mc_pg[0]; - m3->mc_snum++; - m3->mc_top++; - } - if (m3->mc_top >= mc->mc_top && m3->mc_pg[mc->mc_top] == mp) { - if (m3->mc_ki[mc->mc_top] >= newindx && !(nflags & MDB_SPLIT_REPLACE)) - m3->mc_ki[mc->mc_top]++; - if (m3->mc_ki[mc->mc_top] >= fixup) { - m3->mc_pg[mc->mc_top] = rp; - m3->mc_ki[mc->mc_top] -= fixup; - m3->mc_ki[ptop] = mn.mc_ki[ptop]; - } - } else if (!did_split && m3->mc_top >= ptop && m3->mc_pg[ptop] == mc->mc_pg[ptop] && - m3->mc_ki[ptop] >= mc->mc_ki[ptop]) { - m3->mc_ki[ptop]++; - } - } - } - DPRINTF(("mp left: %d, rp left: %d", SIZELEFT(mp), SIZELEFT(rp))); - -done: - if (copy) /* tmp page */ - mdb_page_free(env, copy); - if (rc) - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_put(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data, unsigned int flags) -{ - MDB_cursor mc; - MDB_xcursor mx; - - if (!key || !data || dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - if ((flags & (MDB_NOOVERWRITE|MDB_NODUPDATA|MDB_RESERVE|MDB_APPEND|MDB_APPENDDUP)) != flags) - return EINVAL; - - mdb_cursor_init(&mc, txn, dbi, &mx); - return mdb_cursor_put(&mc, key, data, flags); -} - -#ifndef MDB_WBUF -#define MDB_WBUF (1024*1024) -#endif - - /** State needed for a compacting copy. */ -typedef struct mdb_copy { - pthread_mutex_t mc_mutex; - pthread_cond_t mc_cond; - char *mc_wbuf[2]; - char *mc_over[2]; - MDB_env *mc_env; - MDB_txn *mc_txn; - int mc_wlen[2]; - int mc_olen[2]; - pgno_t mc_next_pgno; - HANDLE mc_fd; - int mc_status; - volatile int mc_new; - int mc_toggle; - -} mdb_copy; - - /** Dedicated writer thread for compacting copy. */ -static THREAD_RET ESECT -mdb_env_copythr(void *arg) -{ - mdb_copy *my = arg; - char *ptr; - int toggle = 0, wsize, rc; -#ifdef _WIN32 - DWORD len; -#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL) -#else - int len; -#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0) -#endif - - pthread_mutex_lock(&my->mc_mutex); - my->mc_new = 0; - pthread_cond_signal(&my->mc_cond); - for(;;) { - while (!my->mc_new) - pthread_cond_wait(&my->mc_cond, &my->mc_mutex); - if (my->mc_new < 0) { - my->mc_new = 0; - break; - } - my->mc_new = 0; - wsize = my->mc_wlen[toggle]; - ptr = my->mc_wbuf[toggle]; -again: - while (wsize > 0) { - DO_WRITE(rc, my->mc_fd, ptr, wsize, len); - if (!rc) { - rc = ErrCode(); - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - wsize -= len; - continue; - } else { - rc = EIO; - break; - } - } - if (rc) { - my->mc_status = rc; - break; - } - /* If there's an overflow page tail, write it too */ - if (my->mc_olen[toggle]) { - wsize = my->mc_olen[toggle]; - ptr = my->mc_over[toggle]; - my->mc_olen[toggle] = 0; - goto again; - } - my->mc_wlen[toggle] = 0; - toggle ^= 1; - pthread_cond_signal(&my->mc_cond); - } - pthread_cond_signal(&my->mc_cond); - pthread_mutex_unlock(&my->mc_mutex); - return (THREAD_RET)0; -#undef DO_WRITE -} - - /** Tell the writer thread there's a buffer ready to write */ -static int ESECT -mdb_env_cthr_toggle(mdb_copy *my, int st) -{ - int toggle = my->mc_toggle ^ 1; - pthread_mutex_lock(&my->mc_mutex); - if (my->mc_status) { - pthread_mutex_unlock(&my->mc_mutex); - return my->mc_status; - } - while (my->mc_new == 1) - pthread_cond_wait(&my->mc_cond, &my->mc_mutex); - my->mc_new = st; - my->mc_toggle = toggle; - pthread_cond_signal(&my->mc_cond); - pthread_mutex_unlock(&my->mc_mutex); - return 0; -} - - /** Depth-first tree traversal for compacting copy. */ -static int ESECT -mdb_env_cwalk(mdb_copy *my, pgno_t *pg, int flags) -{ - MDB_cursor mc; - MDB_txn *txn = my->mc_txn; - MDB_node *ni; - MDB_page *mo, *mp, *leaf; - char *buf, *ptr; - int rc, toggle; - unsigned int i; - - /* Empty DB, nothing to do */ - if (*pg == P_INVALID) - return MDB_SUCCESS; - - mc.mc_snum = 1; - mc.mc_top = 0; - mc.mc_txn = txn; - - rc = mdb_page_get(my->mc_txn, *pg, &mc.mc_pg[0], NULL); - if (rc) - return rc; - rc = mdb_page_search_root(&mc, NULL, MDB_PS_FIRST); - if (rc) - return rc; - - /* Make cursor pages writable */ - buf = ptr = malloc(my->mc_env->me_psize * mc.mc_snum); - if (buf == NULL) - return ENOMEM; - - for (i=0; imc_env->me_psize); - mc.mc_pg[i] = (MDB_page *)ptr; - ptr += my->mc_env->me_psize; - } - - /* This is writable space for a leaf page. Usually not needed. */ - leaf = (MDB_page *)ptr; - - toggle = my->mc_toggle; - while (mc.mc_snum > 0) { - unsigned n; - mp = mc.mc_pg[mc.mc_top]; - n = NUMKEYS(mp); - - if (IS_LEAF(mp)) { - if (!IS_LEAF2(mp) && !(flags & F_DUPDATA)) { - for (i=0; imn_flags & F_BIGDATA) { - MDB_page *omp; - pgno_t pg; - - /* Need writable leaf */ - if (mp != leaf) { - mc.mc_pg[mc.mc_top] = leaf; - mdb_page_copy(leaf, mp, my->mc_env->me_psize); - mp = leaf; - ni = NODEPTR(mp, i); - } - - memcpy(&pg, NODEDATA(ni), sizeof(pg)); - rc = mdb_page_get(txn, pg, &omp, NULL); - if (rc) - goto done; - if (my->mc_wlen[toggle] >= MDB_WBUF) { - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); - memcpy(mo, omp, my->mc_env->me_psize); - mo->mp_pgno = my->mc_next_pgno; - my->mc_next_pgno += omp->mp_pages; - my->mc_wlen[toggle] += my->mc_env->me_psize; - if (omp->mp_pages > 1) { - my->mc_olen[toggle] = my->mc_env->me_psize * (omp->mp_pages - 1); - my->mc_over[toggle] = (char *)omp + my->mc_env->me_psize; - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - memcpy(NODEDATA(ni), &mo->mp_pgno, sizeof(pgno_t)); - } else if (ni->mn_flags & F_SUBDATA) { - MDB_db db; - - /* Need writable leaf */ - if (mp != leaf) { - mc.mc_pg[mc.mc_top] = leaf; - mdb_page_copy(leaf, mp, my->mc_env->me_psize); - mp = leaf; - ni = NODEPTR(mp, i); - } - - memcpy(&db, NODEDATA(ni), sizeof(db)); - my->mc_toggle = toggle; - rc = mdb_env_cwalk(my, &db.md_root, ni->mn_flags & F_DUPDATA); - if (rc) - goto done; - toggle = my->mc_toggle; - memcpy(NODEDATA(ni), &db, sizeof(db)); - } - } - } - } else { - mc.mc_ki[mc.mc_top]++; - if (mc.mc_ki[mc.mc_top] < n) { - pgno_t pg; -again: - ni = NODEPTR(mp, mc.mc_ki[mc.mc_top]); - pg = NODEPGNO(ni); - rc = mdb_page_get(txn, pg, &mp, NULL); - if (rc) - goto done; - mc.mc_top++; - mc.mc_snum++; - mc.mc_ki[mc.mc_top] = 0; - if (IS_BRANCH(mp)) { - /* Whenever we advance to a sibling branch page, - * we must proceed all the way down to its first leaf. - */ - mdb_page_copy(mc.mc_pg[mc.mc_top], mp, my->mc_env->me_psize); - goto again; - } else - mc.mc_pg[mc.mc_top] = mp; - continue; - } - } - if (my->mc_wlen[toggle] >= MDB_WBUF) { - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); - mdb_page_copy(mo, mp, my->mc_env->me_psize); - mo->mp_pgno = my->mc_next_pgno++; - my->mc_wlen[toggle] += my->mc_env->me_psize; - if (mc.mc_top) { - /* Update parent if there is one */ - ni = NODEPTR(mc.mc_pg[mc.mc_top-1], mc.mc_ki[mc.mc_top-1]); - SETPGNO(ni, mo->mp_pgno); - mdb_cursor_pop(&mc); - } else { - /* Otherwise we're done */ - *pg = mo->mp_pgno; - break; - } - } -done: - free(buf); - return rc; -} - - /** Copy environment with compaction. */ -static int ESECT -mdb_env_copyfd1(MDB_env *env, HANDLE fd) -{ - MDB_meta *mm; - MDB_page *mp; - mdb_copy my; - MDB_txn *txn = NULL; - pthread_t thr; - int rc; - -#ifdef _WIN32 - my.mc_mutex = CreateMutex(NULL, FALSE, NULL); - my.mc_cond = CreateEvent(NULL, FALSE, FALSE, NULL); - my.mc_wbuf[0] = _aligned_malloc(MDB_WBUF*2, env->me_os_psize); - if (my.mc_wbuf[0] == NULL) - return errno; -#else - pthread_mutex_init(&my.mc_mutex, NULL); - pthread_cond_init(&my.mc_cond, NULL); -#ifdef HAVE_MEMALIGN - my.mc_wbuf[0] = memalign(env->me_os_psize, MDB_WBUF*2); - if (my.mc_wbuf[0] == NULL) - return errno; -#else - rc = posix_memalign((void **)&my.mc_wbuf[0], env->me_os_psize, MDB_WBUF*2); - if (rc) - return rc; -#endif -#endif - memset(my.mc_wbuf[0], 0, MDB_WBUF*2); - my.mc_wbuf[1] = my.mc_wbuf[0] + MDB_WBUF; - my.mc_wlen[0] = 0; - my.mc_wlen[1] = 0; - my.mc_olen[0] = 0; - my.mc_olen[1] = 0; - my.mc_next_pgno = 2; - my.mc_status = 0; - my.mc_new = 1; - my.mc_toggle = 0; - my.mc_env = env; - my.mc_fd = fd; - THREAD_CREATE(thr, mdb_env_copythr, &my); - - rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); - if (rc) - return rc; - - mp = (MDB_page *)my.mc_wbuf[0]; - memset(mp, 0, 2*env->me_psize); - mp->mp_pgno = 0; - mp->mp_flags = P_META; - mm = (MDB_meta *)METADATA(mp); - mdb_env_init_meta0(env, mm); - mm->mm_address = env->me_metas[0]->mm_address; - - mp = (MDB_page *)(my.mc_wbuf[0] + env->me_psize); - mp->mp_pgno = 1; - mp->mp_flags = P_META; - *(MDB_meta *)METADATA(mp) = *mm; - mm = (MDB_meta *)METADATA(mp); - - /* Count the number of free pages, subtract from lastpg to find - * number of active pages - */ - { - MDB_ID freecount = 0; - MDB_cursor mc; - MDB_val key, data; - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0) - freecount += *(MDB_ID *)data.mv_data; - freecount += txn->mt_dbs[0].md_branch_pages + - txn->mt_dbs[0].md_leaf_pages + - txn->mt_dbs[0].md_overflow_pages; - - /* Set metapage 1 */ - mm->mm_last_pg = txn->mt_next_pgno - freecount - 1; - mm->mm_dbs[1] = txn->mt_dbs[1]; - mm->mm_dbs[1].md_root = mm->mm_last_pg; - mm->mm_txnid = 1; - } - my.mc_wlen[0] = env->me_psize * 2; - my.mc_txn = txn; - pthread_mutex_lock(&my.mc_mutex); - while(my.mc_new) - pthread_cond_wait(&my.mc_cond, &my.mc_mutex); - pthread_mutex_unlock(&my.mc_mutex); - rc = mdb_env_cwalk(&my, &txn->mt_dbs[1].md_root, 0); - if (rc == MDB_SUCCESS && my.mc_wlen[my.mc_toggle]) - rc = mdb_env_cthr_toggle(&my, 1); - mdb_env_cthr_toggle(&my, -1); - pthread_mutex_lock(&my.mc_mutex); - while(my.mc_new) - pthread_cond_wait(&my.mc_cond, &my.mc_mutex); - pthread_mutex_unlock(&my.mc_mutex); - THREAD_FINISH(thr); - - mdb_txn_abort(txn); -#ifdef _WIN32 - CloseHandle(my.mc_cond); - CloseHandle(my.mc_mutex); - _aligned_free(my.mc_wbuf[0]); -#else - pthread_cond_destroy(&my.mc_cond); - pthread_mutex_destroy(&my.mc_mutex); - free(my.mc_wbuf[0]); -#endif - return rc; -} - - /** Copy environment as-is. */ -static int ESECT -mdb_env_copyfd0(MDB_env *env, HANDLE fd) -{ - MDB_txn *txn = NULL; - int rc; - size_t wsize; - char *ptr; -#ifdef _WIN32 - DWORD len, w2; -#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL) -#else - ssize_t len; - size_t w2; -#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0) -#endif - - /* Do the lock/unlock of the reader mutex before starting the - * write txn. Otherwise other read txns could block writers. - */ - rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); - if (rc) - return rc; - - if (env->me_txns) { - /* We must start the actual read txn after blocking writers */ - mdb_txn_reset0(txn, "reset-stage1"); - - /* Temporarily block writers until we snapshot the meta pages */ - LOCK_MUTEX_W(env); - - rc = mdb_txn_renew0(txn); - if (rc) { - UNLOCK_MUTEX_W(env); - goto leave; - } - } - - wsize = env->me_psize * 2; - ptr = env->me_map; - w2 = wsize; - while (w2 > 0) { - DO_WRITE(rc, fd, ptr, w2, len); - if (!rc) { - rc = ErrCode(); - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - w2 -= len; - continue; - } else { - /* Non-blocking or async handles are not supported */ - rc = EIO; - break; - } - } - if (env->me_txns) - UNLOCK_MUTEX_W(env); - - if (rc) - goto leave; - - w2 = txn->mt_next_pgno * env->me_psize; -#ifdef WIN32 - { - LARGE_INTEGER fsize; - GetFileSizeEx(env->me_fd, &fsize); - if (w2 > fsize.QuadPart) - w2 = fsize.QuadPart; - } -#else - { - struct stat st; - fstat(env->me_fd, &st); - if (w2 > (size_t)st.st_size) - w2 = st.st_size; - } -#endif - wsize = w2 - wsize; - while (wsize > 0) { - if (wsize > MAX_WRITE) - w2 = MAX_WRITE; - else - w2 = wsize; - DO_WRITE(rc, fd, ptr, w2, len); - if (!rc) { - rc = ErrCode(); - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - wsize -= len; - continue; - } else { - rc = EIO; - break; - } - } - -leave: - mdb_txn_abort(txn); - return rc; -} - -int ESECT -mdb_env_copyfd2(MDB_env *env, HANDLE fd, unsigned int flags) -{ - if (flags & MDB_CP_COMPACT) - return mdb_env_copyfd1(env, fd); - else - return mdb_env_copyfd0(env, fd); -} - -int ESECT -mdb_env_copyfd(MDB_env *env, HANDLE fd) -{ - return mdb_env_copyfd2(env, fd, 0); -} - -int ESECT -mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags) -{ - int rc, len; - char *lpath; - HANDLE newfd = INVALID_HANDLE_VALUE; - - if (env->me_flags & MDB_NOSUBDIR) { - lpath = (char *)path; - } else { - len = strlen(path); - len += sizeof(DATANAME); - lpath = malloc(len); - if (!lpath) - return ENOMEM; - sprintf(lpath, "%s" DATANAME, path); - } - - /* The destination path must exist, but the destination file must not. - * We don't want the OS to cache the writes, since the source data is - * already in the OS cache. - */ -#ifdef _WIN32 - newfd = CreateFile(lpath, GENERIC_WRITE, 0, NULL, CREATE_NEW, - FILE_FLAG_NO_BUFFERING|FILE_FLAG_WRITE_THROUGH, NULL); -#else - newfd = open(lpath, O_WRONLY|O_CREAT|O_EXCL, 0666); -#endif - if (newfd == INVALID_HANDLE_VALUE) { - rc = ErrCode(); - goto leave; - } - - if (env->me_psize >= env->me_os_psize) { -#ifdef O_DIRECT - /* Set O_DIRECT if the file system supports it */ - if ((rc = fcntl(newfd, F_GETFL)) != -1) - (void) fcntl(newfd, F_SETFL, rc | O_DIRECT); -#endif -#ifdef F_NOCACHE /* __APPLE__ */ - rc = fcntl(newfd, F_NOCACHE, 1); - if (rc) { - rc = ErrCode(); - goto leave; - } -#endif - } - - rc = mdb_env_copyfd2(env, newfd, flags); - -leave: - if (!(env->me_flags & MDB_NOSUBDIR)) - free(lpath); - if (newfd != INVALID_HANDLE_VALUE) - if (close(newfd) < 0 && rc == MDB_SUCCESS) - rc = ErrCode(); - - return rc; -} - -int ESECT -mdb_env_copy(MDB_env *env, const char *path) -{ - return mdb_env_copy2(env, path, 0); -} - -int ESECT -mdb_env_set_flags(MDB_env *env, unsigned int flag, int onoff) -{ - if ((flag & CHANGEABLE) != flag) - return EINVAL; - if (onoff) - env->me_flags |= flag; - else - env->me_flags &= ~flag; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_flags(MDB_env *env, unsigned int *arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_flags; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_userctx(MDB_env *env, void *ctx) -{ - if (!env) - return EINVAL; - env->me_userctx = ctx; - return MDB_SUCCESS; -} - -void * ESECT -mdb_env_get_userctx(MDB_env *env) -{ - return env ? env->me_userctx : NULL; -} - -int ESECT -mdb_env_set_assert(MDB_env *env, MDB_assert_func *func) -{ - if (!env) - return EINVAL; -#ifndef NDEBUG - env->me_assert_func = func; -#endif - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_path(MDB_env *env, const char **arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_path; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_fd; - return MDB_SUCCESS; -} - -/** Common code for #mdb_stat() and #mdb_env_stat(). - * @param[in] env the environment to operate in. - * @param[in] db the #MDB_db record containing the stats to return. - * @param[out] arg the address of an #MDB_stat structure to receive the stats. - * @return 0, this function always succeeds. - */ -static int ESECT -mdb_stat0(MDB_env *env, MDB_db *db, MDB_stat *arg) -{ - arg->ms_psize = env->me_psize; - arg->ms_depth = db->md_depth; - arg->ms_branch_pages = db->md_branch_pages; - arg->ms_leaf_pages = db->md_leaf_pages; - arg->ms_overflow_pages = db->md_overflow_pages; - arg->ms_entries = db->md_entries; - - return MDB_SUCCESS; -} - -int ESECT -mdb_env_stat(MDB_env *env, MDB_stat *arg) -{ - int toggle; - - if (env == NULL || arg == NULL) - return EINVAL; - - toggle = mdb_env_pick_meta(env); - - return mdb_stat0(env, &env->me_metas[toggle]->mm_dbs[MAIN_DBI], arg); -} - -int ESECT -mdb_env_info(MDB_env *env, MDB_envinfo *arg) -{ - int toggle; - - if (env == NULL || arg == NULL) - return EINVAL; - - toggle = mdb_env_pick_meta(env); - arg->me_mapaddr = env->me_metas[toggle]->mm_address; - arg->me_mapsize = env->me_mapsize; - arg->me_maxreaders = env->me_maxreaders; - - /* me_numreaders may be zero if this process never used any readers. Use - * the shared numreader count if it exists. - */ - arg->me_numreaders = env->me_txns ? env->me_txns->mti_numreaders : env->me_numreaders; - - arg->me_last_pgno = env->me_metas[toggle]->mm_last_pg; - arg->me_last_txnid = env->me_metas[toggle]->mm_txnid; - return MDB_SUCCESS; -} - -/** Set the default comparison functions for a database. - * Called immediately after a database is opened to set the defaults. - * The user can then override them with #mdb_set_compare() or - * #mdb_set_dupsort(). - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - */ -static void -mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi) -{ - uint16_t f = txn->mt_dbs[dbi].md_flags; - - txn->mt_dbxs[dbi].md_cmp = - (f & MDB_REVERSEKEY) ? mdb_cmp_memnr : - (f & MDB_INTEGERKEY) ? mdb_cmp_cint : mdb_cmp_memn; - - txn->mt_dbxs[dbi].md_dcmp = - !(f & MDB_DUPSORT) ? 0 : - ((f & MDB_INTEGERDUP) - ? ((f & MDB_DUPFIXED) ? mdb_cmp_int : mdb_cmp_cint) - : ((f & MDB_REVERSEDUP) ? mdb_cmp_memnr : mdb_cmp_memn)); -} - -int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi) -{ - MDB_val key, data; - MDB_dbi i; - MDB_cursor mc; - MDB_db dummy; - int rc, dbflag, exact; - unsigned int unused = 0, seq; - size_t len; - - if (txn->mt_dbxs[FREE_DBI].md_cmp == NULL) { - mdb_default_cmp(txn, FREE_DBI); - } - - if ((flags & VALID_FLAGS) != flags) - return EINVAL; - if (txn->mt_flags & MDB_TXN_ERROR) - return MDB_BAD_TXN; - - /* main DB? */ - if (!name) { - *dbi = MAIN_DBI; - if (flags & PERSISTENT_FLAGS) { - uint16_t f2 = flags & PERSISTENT_FLAGS; - /* make sure flag changes get committed */ - if ((txn->mt_dbs[MAIN_DBI].md_flags | f2) != txn->mt_dbs[MAIN_DBI].md_flags) { - txn->mt_dbs[MAIN_DBI].md_flags |= f2; - txn->mt_flags |= MDB_TXN_DIRTY; - } - } - mdb_default_cmp(txn, MAIN_DBI); - return MDB_SUCCESS; - } - - if (txn->mt_dbxs[MAIN_DBI].md_cmp == NULL) { - mdb_default_cmp(txn, MAIN_DBI); - } - - /* Is the DB already open? */ - len = strlen(name); - for (i=2; imt_numdbs; i++) { - if (!txn->mt_dbxs[i].md_name.mv_size) { - /* Remember this free slot */ - if (!unused) unused = i; - continue; - } - if (len == txn->mt_dbxs[i].md_name.mv_size && - !strncmp(name, txn->mt_dbxs[i].md_name.mv_data, len)) { - *dbi = i; - return MDB_SUCCESS; - } - } - - /* If no free slot and max hit, fail */ - if (!unused && txn->mt_numdbs >= txn->mt_env->me_maxdbs) - return MDB_DBS_FULL; - - /* Cannot mix named databases with some mainDB flags */ - if (txn->mt_dbs[MAIN_DBI].md_flags & (MDB_DUPSORT|MDB_INTEGERKEY)) - return (flags & MDB_CREATE) ? MDB_INCOMPATIBLE : MDB_NOTFOUND; - - /* Find the DB info */ - dbflag = DB_NEW|DB_VALID; - exact = 0; - key.mv_size = len; - key.mv_data = (void *)name; - mdb_cursor_init(&mc, txn, MAIN_DBI, NULL); - rc = mdb_cursor_set(&mc, &key, &data, MDB_SET, &exact); - if (rc == MDB_SUCCESS) { - /* make sure this is actually a DB */ - MDB_node *node = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]); - if (!(node->mn_flags & F_SUBDATA)) - return MDB_INCOMPATIBLE; - } else if (rc == MDB_NOTFOUND && (flags & MDB_CREATE)) { - /* Create if requested */ - data.mv_size = sizeof(MDB_db); - data.mv_data = &dummy; - memset(&dummy, 0, sizeof(dummy)); - dummy.md_root = P_INVALID; - dummy.md_flags = flags & PERSISTENT_FLAGS; - rc = mdb_cursor_put(&mc, &key, &data, F_SUBDATA); - dbflag |= DB_DIRTY; - } - - /* OK, got info, add to table */ - if (rc == MDB_SUCCESS) { - unsigned int slot = unused ? unused : txn->mt_numdbs; - txn->mt_dbxs[slot].md_name.mv_data = strdup(name); - txn->mt_dbxs[slot].md_name.mv_size = len; - txn->mt_dbxs[slot].md_rel = NULL; - txn->mt_dbflags[slot] = dbflag; - /* txn-> and env-> are the same in read txns, use - * tmp variable to avoid undefined assignment - */ - seq = ++txn->mt_env->me_dbiseqs[slot]; - txn->mt_dbiseqs[slot] = seq; - - memcpy(&txn->mt_dbs[slot], data.mv_data, sizeof(MDB_db)); - *dbi = slot; - mdb_default_cmp(txn, slot); - if (!unused) { - txn->mt_numdbs++; - } - } - - return rc; -} - -int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *arg) -{ - if (!arg || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_ERROR) - return MDB_BAD_TXN; - - if (txn->mt_dbflags[dbi] & DB_STALE) { - MDB_cursor mc; - MDB_xcursor mx; - /* Stale, must read the DB's root. cursor_init does it for us. */ - mdb_cursor_init(&mc, txn, dbi, &mx); - } - return mdb_stat0(txn->mt_env, &txn->mt_dbs[dbi], arg); -} - -void mdb_dbi_close(MDB_env *env, MDB_dbi dbi) -{ - char *ptr; - if (dbi <= MAIN_DBI || dbi >= env->me_maxdbs) - return; - ptr = env->me_dbxs[dbi].md_name.mv_data; - /* If there was no name, this was already closed */ - if (ptr) { - env->me_dbxs[dbi].md_name.mv_data = NULL; - env->me_dbxs[dbi].md_name.mv_size = 0; - env->me_dbflags[dbi] = 0; - env->me_dbiseqs[dbi]++; - free(ptr); - } -} - -int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags) -{ - /* We could return the flags for the FREE_DBI too but what's the point? */ - if (dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - *flags = txn->mt_dbs[dbi].md_flags & PERSISTENT_FLAGS; - return MDB_SUCCESS; -} - -/** Add all the DB's pages to the free list. - * @param[in] mc Cursor on the DB to free. - * @param[in] subs non-Zero to check for sub-DBs in this DB. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_drop0(MDB_cursor *mc, int subs) -{ - int rc; - - rc = mdb_page_search(mc, NULL, MDB_PS_FIRST); - if (rc == MDB_SUCCESS) { - MDB_txn *txn = mc->mc_txn; - MDB_node *ni; - MDB_cursor mx; - unsigned int i; - - /* LEAF2 pages have no nodes, cannot have sub-DBs */ - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) - mdb_cursor_pop(mc); - - mdb_cursor_copy(mc, &mx); - while (mc->mc_snum > 0) { - MDB_page *mp = mc->mc_pg[mc->mc_top]; - unsigned n = NUMKEYS(mp); - if (IS_LEAF(mp)) { - for (i=0; imn_flags & F_BIGDATA) { - MDB_page *omp; - pgno_t pg; - memcpy(&pg, NODEDATA(ni), sizeof(pg)); - rc = mdb_page_get(txn, pg, &omp, NULL); - if (rc != 0) - goto done; - mdb_cassert(mc, IS_OVERFLOW(omp)); - rc = mdb_midl_append_range(&txn->mt_free_pgs, - pg, omp->mp_pages); - if (rc) - goto done; - } else if (subs && (ni->mn_flags & F_SUBDATA)) { - mdb_xcursor_init1(mc, ni); - rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0); - if (rc) - goto done; - } - } - } else { - if ((rc = mdb_midl_need(&txn->mt_free_pgs, n)) != 0) - goto done; - for (i=0; imt_free_pgs, pg); - } - } - if (!mc->mc_top) - break; - mc->mc_ki[mc->mc_top] = i; - rc = mdb_cursor_sibling(mc, 1); - if (rc) { - if (rc != MDB_NOTFOUND) - goto done; - /* no more siblings, go back to beginning - * of previous level. - */ - mdb_cursor_pop(mc); - mc->mc_ki[0] = 0; - for (i=1; imc_snum; i++) { - mc->mc_ki[i] = 0; - mc->mc_pg[i] = mx.mc_pg[i]; - } - } - } - /* free it */ - rc = mdb_midl_append(&txn->mt_free_pgs, mc->mc_db->md_root); -done: - if (rc) - txn->mt_flags |= MDB_TXN_ERROR; - } else if (rc == MDB_NOTFOUND) { - rc = MDB_SUCCESS; - } - return rc; -} - -int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del) -{ - MDB_cursor *mc, *m2; - int rc; - - if ((unsigned)del > 1 || dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) - return EACCES; - - if (dbi > MAIN_DBI && TXN_DBI_CHANGED(txn, dbi)) - return MDB_BAD_DBI; - - rc = mdb_cursor_open(txn, dbi, &mc); - if (rc) - return rc; - - rc = mdb_drop0(mc, mc->mc_db->md_flags & MDB_DUPSORT); - /* Invalidate the dropped DB's cursors */ - for (m2 = txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) - m2->mc_flags &= ~(C_INITIALIZED|C_EOF); - if (rc) - goto leave; - - /* Can't delete the main DB */ - if (del && dbi > MAIN_DBI) { - rc = mdb_del0(txn, MAIN_DBI, &mc->mc_dbx->md_name, NULL, 0); - if (!rc) { - txn->mt_dbflags[dbi] = DB_STALE; - mdb_dbi_close(txn->mt_env, dbi); - } else { - txn->mt_flags |= MDB_TXN_ERROR; - } - } else { - /* reset the DB record, mark it dirty */ - txn->mt_dbflags[dbi] |= DB_DIRTY; - txn->mt_dbs[dbi].md_depth = 0; - txn->mt_dbs[dbi].md_branch_pages = 0; - txn->mt_dbs[dbi].md_leaf_pages = 0; - txn->mt_dbs[dbi].md_overflow_pages = 0; - txn->mt_dbs[dbi].md_entries = 0; - txn->mt_dbs[dbi].md_root = P_INVALID; - - txn->mt_flags |= MDB_TXN_DIRTY; - } -leave: - mdb_cursor_close(mc); - return rc; -} - -int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp) -{ - if (dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - txn->mt_dbxs[dbi].md_cmp = cmp; - return MDB_SUCCESS; -} - -int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp) -{ - if (dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - txn->mt_dbxs[dbi].md_dcmp = cmp; - return MDB_SUCCESS; -} - -int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel) -{ - if (dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - txn->mt_dbxs[dbi].md_rel = rel; - return MDB_SUCCESS; -} - -int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx) -{ - if (dbi == FREE_DBI || !TXN_DBI_EXIST(txn, dbi)) - return EINVAL; - - txn->mt_dbxs[dbi].md_relctx = ctx; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_maxkeysize(MDB_env *env) -{ - return ENV_MAXKEY(env); -} - -int ESECT -mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx) -{ - unsigned int i, rdrs; - MDB_reader *mr; - char buf[64]; - int rc = 0, first = 1; - - if (!env || !func) - return -1; - if (!env->me_txns) { - return func("(no reader locks)\n", ctx); - } - rdrs = env->me_txns->mti_numreaders; - mr = env->me_txns->mti_readers; - for (i=0; i> 1; - cursor = base + pivot + 1; - val = pid - ids[cursor]; - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - /* found, so it's a duplicate */ - return -1; - } - } - - if( val > 0 ) { - ++cursor; - } - ids[0]++; - for (n = ids[0]; n > cursor; n--) - ids[n] = ids[n-1]; - ids[n] = pid; - return 0; -} - -int ESECT -mdb_reader_check(MDB_env *env, int *dead) -{ - unsigned int i, j, rdrs; - MDB_reader *mr; - MDB_PID_T *pids, pid; - int count = 0; - - if (!env) - return EINVAL; - if (dead) - *dead = 0; - if (!env->me_txns) - return MDB_SUCCESS; - rdrs = env->me_txns->mti_numreaders; - pids = malloc((rdrs+1) * sizeof(MDB_PID_T)); - if (!pids) - return ENOMEM; - pids[0] = 0; - mr = env->me_txns->mti_readers; - for (i=0; ime_pid) { - pid = mr[i].mr_pid; - if (mdb_pid_insert(pids, pid) == 0) { - if (!mdb_reader_pid(env, Pidcheck, pid)) { - LOCK_MUTEX_R(env); - /* Recheck, a new process may have reused pid */ - if (!mdb_reader_pid(env, Pidcheck, pid)) { - for (j=i; j %d", stat.Entries, num_entries) - } - txn, err = env.BeginTxn(nil, 0) - if err != nil { - t.Fatalf("Cannot begin transaction: %s", err) - } - var cursor *Cursor - cursor, err = txn.CursorOpen(dbi) - if err != nil { - cursor.Close() - txn.Abort() - t.Fatalf("Error during cursor open %s", err) - } - var bkey, bval []byte - var rc error - for { - bkey, bval, rc = cursor.Get(nil, nil, NEXT) - if rc != nil { - break - } - skey := string(bkey) - sval := string(bval) - t.Logf("Val: %s", sval) - t.Logf("Key: %s", skey) - var d string - var ok bool - if d, ok = data[skey]; !ok { - t.Errorf("Cannot found: %q", skey) - } - if d != sval { - t.Errorf("Data missmatch: %q <> %q", sval, d) - } - } - cursor.Close() - bval, err = txn.Get(dbi, []byte("Key-0")) - if err != nil { - txn.Abort() - t.Fatalf("Error during txn get %s", err) - } - if string(bval) != "Val-0" { - txn.Abort() - t.Fatalf("Invalid txn get %s", string(bval)) - } - txn.Abort() -} diff --git a/vendor/gomdb/midl.c b/vendor/gomdb/midl.c deleted file mode 100644 index bc2e51e..0000000 --- a/vendor/gomdb/midl.c +++ /dev/null @@ -1,362 +0,0 @@ -// +build lmdb - -/** @file midl.c - * @brief ldap bdb back-end ID List functions */ -/* $OpenLDAP$ */ -/* This work is part of OpenLDAP Software . - * - * Copyright 2000-2014 The OpenLDAP Foundation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * . - */ - -#include -#include -#include -#include -#include -#include "midl.h" - -/** @defgroup internal LMDB Internals - * @{ - */ -/** @defgroup idls ID List Management - * @{ - */ -#define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) ) - -unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ) -{ - /* - * binary search of id in ids - * if found, returns position of id - * if not found, returns first position greater than id - */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = ids[0]; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = CMP( ids[cursor], id ); - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - return cursor; - } - } - - if( val > 0 ) { - ++cursor; - } - return cursor; -} - -#if 0 /* superseded by append/sort */ -int mdb_midl_insert( MDB_IDL ids, MDB_ID id ) -{ - unsigned x, i; - - x = mdb_midl_search( ids, id ); - assert( x > 0 ); - - if( x < 1 ) { - /* internal error */ - return -2; - } - - if ( x <= ids[0] && ids[x] == id ) { - /* duplicate */ - assert(0); - return -1; - } - - if ( ++ids[0] >= MDB_IDL_DB_MAX ) { - /* no room */ - --ids[0]; - return -2; - - } else { - /* insert id */ - for (i=ids[0]; i>x; i--) - ids[i] = ids[i-1]; - ids[x] = id; - } - - return 0; -} -#endif - -MDB_IDL mdb_midl_alloc(int num) -{ - MDB_IDL ids = malloc((num+2) * sizeof(MDB_ID)); - if (ids) { - *ids++ = num; - *ids = 0; - } - return ids; -} - -void mdb_midl_free(MDB_IDL ids) -{ - if (ids) - free(ids-1); -} - -int mdb_midl_shrink( MDB_IDL *idp ) -{ - MDB_IDL ids = *idp; - if (*(--ids) > MDB_IDL_UM_MAX && - (ids = realloc(ids, (MDB_IDL_UM_MAX+1) * sizeof(MDB_ID)))) - { - *ids++ = MDB_IDL_UM_MAX; - *idp = ids; - return 1; - } - return 0; -} - -static int mdb_midl_grow( MDB_IDL *idp, int num ) -{ - MDB_IDL idn = *idp-1; - /* grow it */ - idn = realloc(idn, (*idn + num + 2) * sizeof(MDB_ID)); - if (!idn) - return ENOMEM; - *idn++ += num; - *idp = idn; - return 0; -} - -int mdb_midl_need( MDB_IDL *idp, unsigned num ) -{ - MDB_IDL ids = *idp; - num += ids[0]; - if (num > ids[-1]) { - num = (num + num/4 + (256 + 2)) & -256; - if (!(ids = realloc(ids-1, num * sizeof(MDB_ID)))) - return ENOMEM; - *ids++ = num - 2; - *idp = ids; - } - return 0; -} - -int mdb_midl_append( MDB_IDL *idp, MDB_ID id ) -{ - MDB_IDL ids = *idp; - /* Too big? */ - if (ids[0] >= ids[-1]) { - if (mdb_midl_grow(idp, MDB_IDL_UM_MAX)) - return ENOMEM; - ids = *idp; - } - ids[0]++; - ids[ids[0]] = id; - return 0; -} - -int mdb_midl_append_list( MDB_IDL *idp, MDB_IDL app ) -{ - MDB_IDL ids = *idp; - /* Too big? */ - if (ids[0] + app[0] >= ids[-1]) { - if (mdb_midl_grow(idp, app[0])) - return ENOMEM; - ids = *idp; - } - memcpy(&ids[ids[0]+1], &app[1], app[0] * sizeof(MDB_ID)); - ids[0] += app[0]; - return 0; -} - -int mdb_midl_append_range( MDB_IDL *idp, MDB_ID id, unsigned n ) -{ - MDB_ID *ids = *idp, len = ids[0]; - /* Too big? */ - if (len + n > ids[-1]) { - if (mdb_midl_grow(idp, n | MDB_IDL_UM_MAX)) - return ENOMEM; - ids = *idp; - } - ids[0] = len + n; - ids += len; - while (n) - ids[n--] = id++; - return 0; -} - -void mdb_midl_xmerge( MDB_IDL idl, MDB_IDL merge ) -{ - MDB_ID old_id, merge_id, i = merge[0], j = idl[0], k = i+j, total = k; - idl[0] = (MDB_ID)-1; /* delimiter for idl scan below */ - old_id = idl[j]; - while (i) { - merge_id = merge[i--]; - for (; old_id < merge_id; old_id = idl[--j]) - idl[k--] = old_id; - idl[k--] = merge_id; - } - idl[0] = total; -} - -/* Quicksort + Insertion sort for small arrays */ - -#define SMALL 8 -#define MIDL_SWAP(a,b) { itmp=(a); (a)=(b); (b)=itmp; } - -void -mdb_midl_sort( MDB_IDL ids ) -{ - /* Max possible depth of int-indexed tree * 2 items/level */ - int istack[sizeof(int)*CHAR_BIT * 2]; - int i,j,k,l,ir,jstack; - MDB_ID a, itmp; - - ir = (int)ids[0]; - l = 1; - jstack = 0; - for(;;) { - if (ir - l < SMALL) { /* Insertion sort */ - for (j=l+1;j<=ir;j++) { - a = ids[j]; - for (i=j-1;i>=1;i--) { - if (ids[i] >= a) break; - ids[i+1] = ids[i]; - } - ids[i+1] = a; - } - if (jstack == 0) break; - ir = istack[jstack--]; - l = istack[jstack--]; - } else { - k = (l + ir) >> 1; /* Choose median of left, center, right */ - MIDL_SWAP(ids[k], ids[l+1]); - if (ids[l] < ids[ir]) { - MIDL_SWAP(ids[l], ids[ir]); - } - if (ids[l+1] < ids[ir]) { - MIDL_SWAP(ids[l+1], ids[ir]); - } - if (ids[l] < ids[l+1]) { - MIDL_SWAP(ids[l], ids[l+1]); - } - i = l+1; - j = ir; - a = ids[l+1]; - for(;;) { - do i++; while(ids[i] > a); - do j--; while(ids[j] < a); - if (j < i) break; - MIDL_SWAP(ids[i],ids[j]); - } - ids[l+1] = ids[j]; - ids[j] = a; - jstack += 2; - if (ir-i+1 >= j-l) { - istack[jstack] = ir; - istack[jstack-1] = i; - ir = j-1; - } else { - istack[jstack] = j-1; - istack[jstack-1] = l; - l = i; - } - } - } -} - -unsigned mdb_mid2l_search( MDB_ID2L ids, MDB_ID id ) -{ - /* - * binary search of id in ids - * if found, returns position of id - * if not found, returns first position greater than id - */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = (unsigned)ids[0].mid; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = CMP( id, ids[cursor].mid ); - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - return cursor; - } - } - - if( val > 0 ) { - ++cursor; - } - return cursor; -} - -int mdb_mid2l_insert( MDB_ID2L ids, MDB_ID2 *id ) -{ - unsigned x, i; - - x = mdb_mid2l_search( ids, id->mid ); - - if( x < 1 ) { - /* internal error */ - return -2; - } - - if ( x <= ids[0].mid && ids[x].mid == id->mid ) { - /* duplicate */ - return -1; - } - - if ( ids[0].mid >= MDB_IDL_UM_MAX ) { - /* too big */ - return -2; - - } else { - /* insert id */ - ids[0].mid++; - for (i=(unsigned)ids[0].mid; i>x; i--) - ids[i] = ids[i-1]; - ids[x] = *id; - } - - return 0; -} - -int mdb_mid2l_append( MDB_ID2L ids, MDB_ID2 *id ) -{ - /* Too big? */ - if (ids[0].mid >= MDB_IDL_UM_MAX) { - return -2; - } - ids[0].mid++; - ids[ids[0].mid] = *id; - return 0; -} - -/** @} */ -/** @} */ diff --git a/vendor/gomdb/midl.h b/vendor/gomdb/midl.h deleted file mode 100644 index d17e0ff..0000000 --- a/vendor/gomdb/midl.h +++ /dev/null @@ -1,188 +0,0 @@ -// +build lmdb - -/** @file midl.h - * @brief LMDB ID List header file. - * - * This file was originally part of back-bdb but has been - * modified for use in libmdb. Most of the macros defined - * in this file are unused, just left over from the original. - * - * This file is only used internally in libmdb and its definitions - * are not exposed publicly. - */ -/* $OpenLDAP$ */ -/* This work is part of OpenLDAP Software . - * - * Copyright 2000-2014 The OpenLDAP Foundation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * . - */ - -#ifndef _MDB_MIDL_H_ -#define _MDB_MIDL_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** @defgroup internal LMDB Internals - * @{ - */ - -/** @defgroup idls ID List Management - * @{ - */ - /** A generic unsigned ID number. These were entryIDs in back-bdb. - * Preferably it should have the same size as a pointer. - */ -typedef size_t MDB_ID; - - /** An IDL is an ID List, a sorted array of IDs. The first - * element of the array is a counter for how many actual - * IDs are in the list. In the original back-bdb code, IDLs are - * sorted in ascending order. For libmdb IDLs are sorted in - * descending order. - */ -typedef MDB_ID *MDB_IDL; - -/* IDL sizes - likely should be even bigger - * limiting factors: sizeof(ID), thread stack size - */ -#define MDB_IDL_LOGN 16 /* DB_SIZE is 2^16, UM_SIZE is 2^17 */ -#define MDB_IDL_DB_SIZE (1< -#include -#include "lmdb.h" -*/ -import "C" - -import ( - "math" - "runtime" - "unsafe" -) - -// DBIOpen Database Flags -const ( - REVERSEKEY = C.MDB_REVERSEKEY // use reverse string keys - DUPSORT = C.MDB_DUPSORT // use sorted duplicates - INTEGERKEY = C.MDB_INTEGERKEY // numeric keys in native byte order. The keys must all be of the same size. - DUPFIXED = C.MDB_DUPFIXED // with DUPSORT, sorted dup items have fixed size - INTEGERDUP = C.MDB_INTEGERDUP // with DUPSORT, dups are numeric in native byte order - REVERSEDUP = C.MDB_REVERSEDUP // with DUPSORT, use reverse string dups - CREATE = C.MDB_CREATE // create DB if not already existing -) - -// put flags -const ( - NODUPDATA = C.MDB_NODUPDATA - NOOVERWRITE = C.MDB_NOOVERWRITE - RESERVE = C.MDB_RESERVE - APPEND = C.MDB_APPEND - APPENDDUP = C.MDB_APPENDDUP -) - -// Txn is Opaque structure for a transaction handle. -// All database operations require a transaction handle. -// Transactions may be read-only or read-write. -type Txn struct { - _txn *C.MDB_txn -} - -func (env *Env) BeginTxn(parent *Txn, flags uint) (*Txn, error) { - var _txn *C.MDB_txn - var ptxn *C.MDB_txn - if parent == nil { - ptxn = nil - } else { - ptxn = parent._txn - } - if flags&RDONLY == 0 { - runtime.LockOSThread() - } - ret := C.mdb_txn_begin(env._env, ptxn, C.uint(flags), &_txn) - if ret != SUCCESS { - runtime.UnlockOSThread() - return nil, errno(ret) - } - return &Txn{_txn}, nil -} - -func (txn *Txn) Commit() error { - ret := C.mdb_txn_commit(txn._txn) - runtime.UnlockOSThread() - // The transaction handle is freed if there was no error - if ret == C.MDB_SUCCESS { - txn._txn = nil - } - return errno(ret) -} - -func (txn *Txn) Abort() { - if txn._txn == nil { - return - } - C.mdb_txn_abort(txn._txn) - runtime.UnlockOSThread() - // The transaction handle is always freed. - txn._txn = nil -} - -func (txn *Txn) Reset() { - C.mdb_txn_reset(txn._txn) -} - -func (txn *Txn) Renew() error { - ret := C.mdb_txn_renew(txn._txn) - return errno(ret) -} - -func (txn *Txn) DBIOpen(name *string, flags uint) (DBI, error) { - var _dbi C.MDB_dbi - var cname *C.char - if name == nil { - cname = nil - } else { - cname = C.CString(*name) - defer C.free(unsafe.Pointer(cname)) - } - ret := C.mdb_dbi_open(txn._txn, cname, C.uint(flags), &_dbi) - if ret != SUCCESS { - return DBI(math.NaN()), errno(ret) - } - return DBI(_dbi), nil -} - -func (txn *Txn) Stat(dbi DBI) (*Stat, error) { - var _stat C.MDB_stat - ret := C.mdb_stat(txn._txn, C.MDB_dbi(dbi), &_stat) - if ret != SUCCESS { - return nil, errno(ret) - } - stat := Stat{PSize: uint(_stat.ms_psize), - Depth: uint(_stat.ms_depth), - BranchPages: uint64(_stat.ms_branch_pages), - LeafPages: uint64(_stat.ms_leaf_pages), - OverflowPages: uint64(_stat.ms_overflow_pages), - Entries: uint64(_stat.ms_entries)} - return &stat, nil -} - -func (txn *Txn) Drop(dbi DBI, del int) error { - ret := C.mdb_drop(txn._txn, C.MDB_dbi(dbi), C.int(del)) - return errno(ret) -} - -func (txn *Txn) Get(dbi DBI, key []byte) ([]byte, error) { - val, err := txn.GetVal(dbi, key) - if err != nil { - return nil, err - } - return val.Bytes(), nil -} - -func (txn *Txn) GetVal(dbi DBI, key []byte) (Val, error) { - ckey := Wrap(key) - var cval Val - ret := C.mdb_get(txn._txn, C.MDB_dbi(dbi), (*C.MDB_val)(&ckey), (*C.MDB_val)(&cval)) - return cval, errno(ret) -} - -func (txn *Txn) Put(dbi DBI, key []byte, val []byte, flags uint) error { - ckey := Wrap(key) - cval := Wrap(val) - ret := C.mdb_put(txn._txn, C.MDB_dbi(dbi), (*C.MDB_val)(&ckey), (*C.MDB_val)(&cval), C.uint(flags)) - return errno(ret) -} - -func (txn *Txn) Del(dbi DBI, key, val []byte) error { - ckey := Wrap(key) - if val == nil { - ret := C.mdb_del(txn._txn, C.MDB_dbi(dbi), (*C.MDB_val)(&ckey), nil) - return errno(ret) - } - cval := Wrap(val) - ret := C.mdb_del(txn._txn, C.MDB_dbi(dbi), (*C.MDB_val)(&ckey), (*C.MDB_val)(&cval)) - return errno(ret) -} - -type Cursor struct { - _cursor *C.MDB_cursor -} - -func (txn *Txn) CursorOpen(dbi DBI) (*Cursor, error) { - var _cursor *C.MDB_cursor - ret := C.mdb_cursor_open(txn._txn, C.MDB_dbi(dbi), &_cursor) - if ret != SUCCESS { - return nil, errno(ret) - } - return &Cursor{_cursor}, nil -} - -func (txn *Txn) CursorRenew(cursor *Cursor) error { - ret := C.mdb_cursor_renew(txn._txn, cursor._cursor) - return errno(ret) -} - -/* -type CmpFunc func(a, b []byte) int - -func (txn *Txn) SetCompare(dbi DBI, cmp CmpFunc) error { - f := func(a, b *C.MDB_val) C.int { - ga := C.GoBytes(a.mv_data, C.int(a.mv_size)) - gb := C.GoBytes(a.mv_data, C.int(a.mv_size)) - return C.int(cmp(ga, gb)) - } - ret := C.mdb_set_compare(txn._txn, C.MDB_dbi(dbi), *unsafe.Pointer(&f)) - return errno(ret) -} -*/ -// func (txn *Txn) SetDupSort(dbi DBI, comp *C.MDB_comp_func) error -// func (txn *Txn) SetRelFunc(dbi DBI, rel *C.MDB_rel_func) error -// func (txn *Txn) SetRelCtx(dbi DBI, void *) error diff --git a/vendor/gomdb/val.go b/vendor/gomdb/val.go deleted file mode 100644 index e6bc407..0000000 --- a/vendor/gomdb/val.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build lmdb - -package mdb - -/* -#cgo CFLAGS: -pthread -W -Wall -Wno-unused-parameter -Wbad-function-cast -O2 -g -#cgo CFLAGS: -I/usr/local - -#include -#include -#include "lmdb.h" -*/ -import "C" - -import ( - "reflect" - "unsafe" -) - -// MDB_val -type Val C.MDB_val - -// Create a Val that points to p's data. the Val's data must not be freed -// manually and C references must not survive the garbage collection of p (and -// the returned Val). -func Wrap(p []byte) Val { - if len(p) == 0 { - return Val(C.MDB_val{}) - } - return Val(C.MDB_val{ - mv_size: C.size_t(len(p)), - mv_data: unsafe.Pointer(&p[0]), - }) -} - -// If val is nil, a empty slice is retured. -func (val Val) Bytes() []byte { - return C.GoBytes(val.mv_data, C.int(val.mv_size)) -} - -// If val is nil, a empty slice is retured. -func (val Val) BytesNoCopy() []byte { - hdr := reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(val.mv_data)), - Len: int(val.mv_size), - Cap: int(val.mv_size), - } - return *(*[]byte)(unsafe.Pointer(&hdr)) -} - -// If val is nil, an empty string is returned. -func (val Val) String() string { - return C.GoStringN((*C.char)(val.mv_data), C.int(val.mv_size)) -} diff --git a/vendor/gomdb/val_test.go b/vendor/gomdb/val_test.go deleted file mode 100644 index 8e16dd5..0000000 --- a/vendor/gomdb/val_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build lmdb - -package mdb - -import ( - "testing" -) - -func TestVal(t *testing.T) { - orig := "hey hey" - val := Wrap([]byte(orig)) - - s := val.String() - if s != orig { - t.Errorf("String() not the same as original data: %q", s) - } - - p := val.Bytes() - if string(p) != orig { - t.Errorf("Bytes() not the same as original data: %q", p) - } -} - -func TestValNoCopy(t *testing.T) { - orig := "hey hey" - val := Wrap([]byte(orig)) - - s := val.String() - if s != orig { - t.Errorf("String() not the same as original data: %q", s) - } - - p := val.BytesNoCopy() - if string(p) != orig { - t.Errorf("Bytes() not the same as original data: %q", p) - } -} diff --git a/vendor/lua/lua_test.go b/vendor/lua/lua_test.go deleted file mode 100644 index b37bf75..0000000 --- a/vendor/lua/lua_test.go +++ /dev/null @@ -1,386 +0,0 @@ -// +build lua - -package lua - -import ( - "testing" - "unsafe" -) - -type TestStruct struct { - IntField int - StringField string - FloatField float64 -} - -func TestGoStruct(t *testing.T) { - L := NewState() - L.OpenLibs() - defer L.Close() - - ts := &TestStruct{10, "test", 2.3} - - L.CheckStack(1) - - L.PushGoStruct(ts) - L.SetGlobal("t") - - L.GetGlobal("t") - if !L.IsGoStruct(-1) { - t.Fatal("Not go struct") - } - - tsr := L.ToGoStruct(-1).(*TestStruct) - if tsr != ts { - t.Fatal("Retrieved something different from what we inserted") - } - - L.Pop(1) - - L.PushString("This is not a struct") - if L.ToGoStruct(-1) != nil { - t.Fatal("Non-GoStruct value attempted to convert into GoStruct should result in nil") - } - - L.Pop(1) -} - -func TestCheckStringSuccess(t *testing.T) { - L := NewState() - L.OpenLibs() - defer L.Close() - - Test := func(L *State) int { - L.PushString("this is a test") - L.CheckString(-1) - return 0 - } - - L.Register("test", Test) - err := L.DoString("test()") - if err != nil { - t.Fatalf("DoString did return an error: %v\n", err.Error()) - } -} - -func TestCheckStringFail(t *testing.T) { - L := NewState() - L.OpenLibs() - defer L.Close() - - Test := func(L *State) int { - L.CheckString(-1) - return 0 - } - - L.Register("test", Test) - err := L.DoString("test();") - if err == nil { - t.Fatal("DoString did not return an error\n") - } -} - -func TestPCallHidden(t *testing.T) { - L := NewState() - L.OpenLibs() - defer L.Close() - - err := L.DoString("pcall(print, \"ciao\")") - if err == nil { - t.Fatal("Can use pcall\n") - } - - err = L.DoString("unsafe_pcall(print, \"ciao\")") - if err != nil { - t.Fatal("Can not use unsafe_pcall\n") - } -} - -func TestCall(t *testing.T) { - L := NewState() - L.OpenLibs() - defer L.Close() - - test := func(L *State) int { - arg1 := L.ToString(1) - arg2 := L.ToString(2) - arg3 := L.ToString(3) - - if arg1 != "Argument1" { - t.Fatal("Got wrong argument (1)") - } - - if arg2 != "Argument2" { - t.Fatal("Got wrong argument (2)") - } - - if arg3 != "Argument3" { - t.Fatal("Got wrong argument (3)") - } - - L.PushString("Return1") - L.PushString("Return2") - - return 2 - } - - L.Register("test", test) - - L.PushString("Dummy") - L.GetGlobal("test") - L.PushString("Argument1") - L.PushString("Argument2") - L.PushString("Argument3") - err := L.Call(3, 2) - - if err != nil { - t.Fatalf("Error executing call: %v\n", err) - } - - dummy := L.ToString(1) - ret1 := L.ToString(2) - ret2 := L.ToString(3) - - if dummy != "Dummy" { - t.Fatal("The stack was disturbed") - } - - if ret1 != "Return1" { - t.Fatalf("Wrong return value (1) got: <%s>", ret1) - } - - if ret2 != "Return2" { - t.Fatalf("Wrong return value (2) got: <%s>", ret2) - } -} - -// equivalent to basic.go -func TestLikeBasic(t *testing.T) { - L := NewState() - defer L.Close() - L.OpenLibs() - - testCalled := 0 - - test := func(L *State) int { - testCalled++ - return 0 - } - - test2Arg := -1 - test2Argfrombottom := -1 - test2 := func(L *State) int { - test2Arg = L.CheckInteger(-1) - test2Argfrombottom = L.CheckInteger(1) - return 0 - } - - L.GetField(LUA_GLOBALSINDEX, "print") - L.PushString("Hello World!") - if err := L.Call(1, 0); err != nil { - t.Fatalf("Call to print returned error") - } - - L.PushGoFunction(test) - L.PushGoFunction(test) - L.PushGoFunction(test) - L.PushGoFunction(test2) - L.PushInteger(42) - if err := L.Call(1, 0); err != nil { - t.Fatalf("Call to print returned error") - } - if (test2Arg != 42) || (test2Argfrombottom != 42) { - t.Fatalf("Call to test2 didn't work") - } - - if err := L.Call(0, 0); err != nil { - t.Fatalf("Call to print returned error") - } - if err := L.Call(0, 0); err != nil { - t.Fatalf("Call to print returned error") - } - if err := L.Call(0, 0); err != nil { - t.Fatalf("Call to print returned error") - } - if testCalled != 3 { - t.Fatalf("Test function not called the correct number of times: %d\n", testCalled) - } - - // this will fail as we didn't register test2 function - if err := L.DoString("test2(42)"); err == nil { - t.Fatal("No error when calling unregistered function") - } -} - -// equivalent to quickstart.go -func TestLikeQuickstart(t *testing.T) { - adder := func(L *State) int { - a := L.ToInteger(1) - b := L.ToInteger(2) - L.PushInteger(int64(a + b)) - return 1 - } - - L := NewState() - defer L.Close() - L.OpenLibs() - - L.Register("adder", adder) - - if err := L.DoString("return adder(2, 2)"); err != nil { - t.Fatalf("Error during call to adder: %v\n", err) - } - if r := L.ToInteger(1); r != 4 { - t.Fatalf("Wrong return value from adder (was: %d)\n", r) - } -} - -// equivalent to userdata.go -func TestLikeUserdata(t *testing.T) { - type Userdata struct { - a, b int - } - - userDataProper := func(L *State) { - rawptr := L.NewUserdata(uintptr(unsafe.Sizeof(Userdata{}))) - var ptr *Userdata - ptr = (*Userdata)(rawptr) - ptr.a = 2 - ptr.b = 3 - - rawptr2 := L.ToUserdata(-1) - ptr2 := (*Userdata)(rawptr2) - - if ptr != ptr2 { - t.Fatalf("Failed to create userdata\n") - } - } - - testCalled := 0 - test := func(L *State) int { - testCalled++ - return 0 - } - - goDefinedFunctions := func(L *State) { - // example_function is registered inside Lua VM - L.Register("test", test) - - // This code demonstrates checking that a value on the stack is a go function - L.CheckStack(1) - L.GetGlobal("test") - if !L.IsGoFunction(-1) { - t.Fatalf("IsGoFunction failed to recognize a Go function object") - } - L.Pop(1) - - // We call example_function from inside Lua VM - testCalled = 0 - if err := L.DoString("test()"); err != nil { - t.Fatalf("Error executing test function: %v\n", err) - } - if testCalled != 1 { - t.Fatalf("It appears the test function wasn't actually called\n") - } - } - - type TestObject struct { - AField int - } - - goDefinedObjects := func(L *State) { - z := &TestObject{42} - - L.PushGoStruct(z) - L.SetGlobal("z") - - // This code demonstrates checking that a value on the stack is a go object - L.CheckStack(1) - L.GetGlobal("z") - if !L.IsGoStruct(-1) { - t.Fatal("IsGoStruct failed to recognize a Go struct\n") - } - L.Pop(1) - - // This code demonstrates access and assignment to a field of a go object - if err := L.DoString("return z.AField"); err != nil { - t.Fatal("Couldn't execute code") - } - before := L.ToInteger(-1) - L.Pop(1) - if before != 42 { - t.Fatalf("Wrong value of z.AField before change (%d)\n", before) - } - if err := L.DoString("z.AField = 10;"); err != nil { - t.Fatal("Couldn't execute code") - } - if err := L.DoString("return z.AField"); err != nil { - t.Fatal("Couldn't execute code") - } - after := L.ToInteger(-1) - L.Pop(1) - if after != 10 { - t.Fatalf("Wrong value of z.AField after change (%d)\n", after) - } - } - - L := NewState() - defer L.Close() - L.OpenLibs() - - userDataProper(L) - goDefinedFunctions(L) - goDefinedObjects(L) -} - -func TestStackTrace(t *testing.T) { - L := NewState() - defer L.Close() - L.OpenLibs() - - err := L.DoFile("../example/calls.lua") - if err == nil { - t.Fatal("No error returned from the execution of calls.lua") - } - - le := err.(*LuaError) - - if le.Code() != LUA_ERRERR { - t.Fatalf("Wrong kind of error encountered running calls.lua: %v (%d %d)\n", le, le.Code(), LUA_ERRERR) - } - - if len(le.StackTrace()) != 6 { - t.Fatalf("Wrong size of stack trace (%v)\n", le.StackTrace()) - } -} - -func TestConv(t *testing.T) { - L := NewState() - defer L.Close() - L.OpenLibs() - - L.PushString("10") - n := L.ToNumber(-1) - if n != 10 { - t.Fatalf("Wrong conversion (str -> int)") - } - if L.Type(-1) != LUA_TSTRING { - t.Fatalf("Wrong type (str)") - } - - L.Pop(1) - - L.PushInteger(10) - s := L.ToString(-1) - if s != "10" { - t.Fatalf("Wrong conversion (int -> str)") - } - - L.Pop(1) - - L.PushString("a\000test") - s = L.ToString(-1) - if s != "a\000test" { - t.Fatalf("Wrong conversion (str -> str): <%s>", s) - } -}