mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
25888ae Merge pull request #329 from fyrz/master 89833e5 Fixed signed-unsigned comparison warning in db_test.cc fcac705 Fixed compile warning on Mac caused by unused variables. b3343fd resolution for java build problem introduced by 5ec53f3edf62bec1b690ce12fb21a6c52203f3c8 187b299 ForwardIterator: update prev_key_ only if prefix hasn't changed 5ec53f3 make compaction related options changeable d122e7b Update INSTALL.md 986dad0 Merge pull request #324 from dalgaaf/wip-da-SCA-20140930 8ee75dc db/memtable.cc: remove unused variable merge_result 0fd8bbc db/db_impl.cc: reduce scope of prefix_initialized 676ff7b compaction_picker.cc: remove check for >=0 for unsigned e55aea5 document_db.cc: fix assert d517c83 in_table_factory.cc: use correct format specifier b140375 ttl/ttl_test.cc: prefer prefix ++operator for non-primitive types 43c789c spatialdb/spatial_db.cc: use !empty() instead of 'size() > 0' 0de452e document_db.cc: pass const parameter by reference 4cc8643 util/ldb_cmd.cc: prefer prefix ++operator for non-primitive types af8c2b2 util/signal_test.cc: suppress intentional null pointer deref 33580fa db/db_impl.cc: fix object handling, remove double lines 873f135 db_ttl_impl.h: pass func parameter by reference 8558457 ldb_cmd_execute_result.h: perform init in initialization list 063471b table/table_test.cc: pass func parameter by reference 93548ce table/cuckoo_table_reader.cc: pass func parameter by ref b8b7117 db/version_set.cc: use !empty() instead of 'size() > 0' 8ce050b table/bloom_block.*: pass func parameter by reference 53910dd db_test.cc: pass parameter by reference 68ca534 corruption_test.cc: pass parameter by reference 7506198 cuckoo_table_db_test.cc: add flush after delete 1f96330 Print MB per second compaction throughput separately for reads and writes ffe3d49 Add an instruction about SSE in INSTALL.md ee1f3cc Package generation for Ubuntu and CentOS f0f7955 Fixing comile errors on OS X 99fb613 remove 2 space linter b2d64a4 Fix linters, second try 747523d Print per column family metrics in db_bench 56ebd40 Fix arc lint (should fix #238) 637f891 Merge pull request #321 from eonnen/master 827e31c Make test use a compatible type in the size checks. fd5d80d CompactedDB: log using the correct info_log 2faf49d use GetContext to replace callback function pointer 983d2de Add AUTHORS file. Fix #203 abd70c5 Merge pull request #316 from fyrz/ReverseBytewiseComparator 2dc6f62 handle kDelete type in cuckoo builder 8b8011a Changed name of ReverseBytewiseComparator based on review comment 389edb6 universal compaction picker: use double for potential overflow 5340484 Built-in comparator(s) in RocksJava d439451 delay initialization of cuckoo table iterator 94997ea reduce memory usage of cuckoo table builder c627595 improve memory efficiency of cuckoo reader 581442d option to choose module when calculating CuckooTable hash fbd2daf CompactedDBImpl::MultiGet() for better CuckooTable performance 3c68006 CompactedDBImpl f7375f3 Fix double deletes 21ddcf6 Remove allow_thread_local fb4a492 Merge pull request #311 from ankgup87/master 611e286 Merge branch 'master' of https://github.com/facebook/rocksdb 0103b44 Merge branch 'master' of ssh://github.com/ankgup87/rocksdb 1dfb7bb Add block based table config options cdaf44f Enlarge log size cap when printing file summary 7cc1ed7 Merge pull request #309 from naveenatceg/staticbuild ba6d660 Resolving merge conflict 51eeaf6 Addressing review comments fd7d3fe Addressing review comments (adding a env variable to override temp directory) cf7ace8 Addressing review comments 0a29ce5 re-enable BlockBasedTable::SetupForCompaction() 55af370 Remove TODO for checking index checksums 3d74f09 Fix compile 53b0039 Fix release compile d0de413 WriteBatchWithIndex to allow different Comparators for different column families 57a32f1 change target_file_size_base to uint64_t 5e6aee4 dont create backup_input if compaction filter v2 is not used 49b5f94 Merge pull request #306 from Liuchang0812/fix_cast 787cb4d remove cast, replace %llu with % PRIu64 a7574d4 Update logging.cc 7e0dcb9 Update logging.cc 57fa3cc Merge pull request #304 from Liuchang0812/fix-check cd44522 Merge pull request #305 from Liuchang0812/fix-logging 6a031b6 remove unused variable 4436f17 fixed #303: replace %ld with % PRId64 7a1bd05 Merge pull request #302 from ankgup87/master 423e52c Merge branch 'master' of https://github.com/facebook/rocksdb bfeef94 Add rate limiter 32f2532 Print compression_size_percent as a signed int 976caca Skip AllocateTest if fallocate() is not supported in the file system 3b897cd Enable no-fbcode RocksDB build f445947 RocksDB: Format uint64 using PRIu64 in db_impl.cc e17bc65 Merge pull request #299 from ankgup87/master b93797a Fix build adae3ca [Java] Fix JNI link error caused by the removal of options.db_stats_log_interval 90b8c07 Fix unit tests errors 51af7c3 CuckooTable: add one option to allow identity function for the first hash function 0350435 Fixed a signed-unsigned comparison in spatial_db.cc -- issue #293 2fb1fea Fix syncronization issues ff76895 Remove some unnecessary constructors feadb9d fix cuckoo table builder test 3c232e1 Fix mac compile 54cada9 Run make format on PR #249 27b22f1 Merge pull request #249 from tdfischer/decompression-refactoring fb6456b Replace naked calls to operator new and delete (Fixes #222) 5600c8f cuckoo table: return estimated size - 1 a062e1f SetOptions() for memtable related options e4eca6a Options conversion function for convenience a7c2094 Merge pull request #292 from saghmrossi/master 4d05234 Merge branch 'master' of github.com:saghmrossi/rocksdb 60a4aa1 Test use_mmap_reads 94e43a1 [Java] Fixed 32-bit overflowing issue when converting jlong to size_t f9eaaa6 added include for inttypes.h to fix nonworking printf statements f090575 Replaced "built on on earlier work" by "built on earlier work" in README.md faad439 Fix #284 49aacd8 Fix make install acb9348 [Java] Include WriteBatch into RocksDBSample.java, fix how DbBenchmark.java handles WriteBatch. 4a27a2f Don't sync manifest when disableDataSync = true 9b8480d Merge pull request #287 from yinqiwen/rate-limiter-crash-fix 28be16b fix rate limiter crash #286 04ce1b2 Fix #284 add22e3 standardize scripts to run RocksDB benchmarks dee91c2 WriteThread 540a257 Fix WAL synced 24f034b Merge pull request #282 from Chilledheart/develop 49fe329 Fix build issue under macosx ebb5c65 Add make install 0352a9f add_wrapped_bloom_test 9c0e66c Don't run background jobs (flush, compactions) when bg_error_ is set a9639bd Fix valgrind test d1f24dc Relax FlushSchedule test 3d9e6f7 Push model for flushing memtables 059e584 [unit test] CompactRange should fail if we don't have space dd641b2 fix RocksDB java build 53404d9 add_qps_info_in cache bench a52cecb Fix Mac compile 092f97e Fix comments and typos 6cc1286 Added a few statistics for BackupableDB 0a42295 Fix SimpleWriteTimeoutTest 06d9862 Always pass MergeContext as pointer, not reference d343c3f Improve db recovery 6bb7e3e Merger test 88841bd Explicitly cast char to signed char in Hash() 5231146 MemTableOptions 1d284db Addressing review comments 55114e7 Some updates for SpatialDB 171d4ff remove TailingIterator reference in db_impl.h 9b0f7ff rename version_set options_ to db_options_ to avoid confusion 2d57828 Check stop level trigger-0 before slowdown level-0 trigger 659d2d5 move compaction_filter to immutable_options 048560a reduce references to cfd->options() in DBImpl 011241b DB::Flush() Do not wait for background threads when there is nothing in mem table a2bb7c3 Push- instead of pull-model for managing Write stalls 0af157f Implement full filter for block based table. 9360cc6 Fix valgrind issue 02d5bff Merge pull request #277 from wankai/master 88a2f44 fix comments 7c16e39 Merge pull request #276 from wankai/master 8237738 replace hard-coded number with named variable db8ca52 Merge pull request #273 from nbougalis/static-analysis b7b031f Merge pull request #274 from wankai/master 4c2b1f0 Merge remote-tracking branch 'upstream/master' a5d2863 typo improvement 9f8aa09 Don't leak data returned by opendir d1cfb71 Remove unused member(s) bfee319 sizeof(int*) where sizeof(int) was intended d40c1f7 Add missing break statement 2e97c38 Avoid off-by-one error when using readlink 40ddc3d add cache bench 9f1c80b Drop column family from write thread 8de151b Add db_bench with lots of column families to regression tests c9e419c rename options_ to db_options_ in DBImpl to avoid confusion 5cd0576 Fix compaction bug in Cuckoo Table Builder. Use kvs_.size() instead of num_entries in FileSize() method. 0fbb3fa fixed memory leak in unit test DBIteratorBoundTest adcd253 fix asan check 4092b7a Merge pull request #272 from project-zerus/patch-1 bb6ae0f fix more compile warnings 6d31441 Merge pull request #271 from nbougalis/cleanups 0cd0ec4 Plug memory leak during index creation 4329d74 Fix swapped variable names to accurately reflect usage 45a5e3e Remove path with arena==nullptr from NewInternalIterator 5665e5e introduce ImmutableOptions e0b99d4 created a new ReadOptions parameter 'iterate_upper_bound' 51ea889 Fix travis builds a481626 Relax backupable rate limiting test f7f973d Merge pull request #269 from huahang/patch-2 ef5b384 fix a few compile warnings 2fd3806 Merge pull request #263 from wankai/master 1785114 delete unused Comparator 1b1d961 update HISTORY.md 703c3ea comments about the BlockBasedTableOptions migration in Options 4b5ad88 Merge pull request #260 from wankai/master 19cc588 change to filter_block std::unique_ptr support RAII 9b976e3 Merge pull request #259 from wankai/master 5d25a46 Merge remote-tracking branch 'upstream/master' 9b58c73 call SanitizeDBOptionsByCFOptions() in the right place a84234a Ignore missing column families 8ed70fc add assert to db Put in db_stress test 7f19bb9 Merge pull request #242 from tdfischer/perf-timer-destructors 8438a19 fix dropping column family bug 6614a48 Refactor PerfStepTimer to stop on destruct 076bd01 Fix compile 990df99 Fix ios compile 7dcadb1 Don't let flush preempt compaction in certain cases dff2b1a typo improvement 985a31c Merge pull request #251 from nbougalis/master f09329c Fix candidate file comparison when using path ids 7e9f28c limit max bytes that can be read/written per pread/write syscall d20b8cf Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. 0f9c43e ForwardIterator: reset incomplete iterators on Seek() 722d80c reduce recordTick overhead in compaction loop 22a0a60 Merge pull request #250 from wankai/master be25ee4 delete unused struct Options 0c26e76 Merge pull request #237 from tdfischer/tdfischer/faster-timeout-test 1d23b5c remove_internal_filter_policy 2a8faf7 Compact SpatialDB as we go, not at the end 7f71448 Implementing a cache friendly version of Cuckoo Hash d977e55 Don't let other compactions run when manual compaction runs d5bd6c7 Fix ios compile 6b46f78 Merge pull request #248 from wankai/master 528a11c Update block_builder.h 536e997 Remove assert in vector rep 4142a3e Adding a user comparator for comparing Uint64 slices. 1913ce2 more concurrent flushes in SpatialDB 808e809 Adjust SpatialDB column family options 0c39f54 Use Vector memtable when bulk loading SpatialDB b6fd781 Don't do memtable lookup in db_impl_readonly if memtables are empty while opening db. 9dcb75b Add is-file-deletions-enabled property 1755581 improve OptimizeForPointLookup() d9c0785 Fix assertion in PosixRandomAccessFile bda6f33 fix valgrind error in c_test caused by BlockBasedTableOptions 0db6b02 Update timeout to 50ms instead of 3. ff6ec0e Optimize SpatialDB 2386185 ReadOptions.total_order_seek to allow total order seek for block-based table when hash index is enabled a98badf print table options 66f62e5 JNI changes corresponding to BlockBasedTableOptions migration 3844001 move block based table related options BlockBasedTableOptions 17b54ae Merge pull request #243 from andybons/patch-1 0508691 Add missing include to use std::unique_ptr 42ea795 Fix concurrency issue in CompactionPicker bb530c0 Merge pull request #240 from ShaoYuZhang/master f76eda7 Fix compilation issue on OSX 08be7f5 Implement Prepare method in CuckooTableReader 47b452c Fix the error of c_test.c 562b7a1 Add missing implementaiton of SanitizeDBOptions in simple_table_db_test.cc 63a2215 Improve Options sanitization and add MmapReadRequired() to TableFactory e173bf9 Eliminate VersionSet memory leak 10720a5 Revert the unintended change that DestroyDB() doesn't clean up info logs. 01cbdd2 Optimize storage parameters for spatialDB 045575a Add CuckooHash table format to table_reader_bench 7c5173d test: db: fix test to have a smaller timeout for when it runs on faster hardware 6929b08 Remove BitStream* tests 50b790c Removing BitStream* functions 162b815 Adding Column Family support in db_bench. 28b5c76 WriteBatchWithIndex: a wrapper of WriteBatch, with a searchable index 5585e00 Update release note of 3.4 343e98a Reverting import change ddb8039 RocksDB static build Make file changes to download and build the dependencies .Load the shared library when RocksDB is initialized 68eed8c Bump up version 36e759d Adding Cuckoo Table SST option to db_bench a6fd14c Fix valgrind error in c_test c703715 attempt to fix auto_roll_logger_test c8ecfae Merge pull request #230 from cockroachdb/spencerkimball/send-user-keys-to-v2-filter 570ba5a Avoid retrying to read property block from a table when it does not exist. 625b9ef Merge pull request #234 from bbiao/master 59a2763 Fix typo huage => huge f611935 Fix autovector iterator increment/decrement comments 58b0f9d Support purging logs from separate log directory 2da53b1 [Java] Add purgeOldBackups API 6c4c159 fix_sst_dump_for_old_sst_format 8dfe2fd fix compile error under Mac OS X 58c4946 Allow env_posix to lower background thread IO priority 6a2be31 fix_valgrind_error_caused_in_db_info_dummper e91ebf1 print compaction_filter name in Options.Dump 5a5953b Add histogram for DB_SEEK 5e64240 log db path info before open 0c9dc9f Remove malloc from FormatFileNumber bcefede Update HISTORY.md 4808177 Revert "Include candidate files under options.db_log_dir in FindObsoleteFiles()" 0138b8e Fixed compile errors (signed / unsigned comparison) in cuckoo_table_db_test on Mac 1562653 Fixed a signed-unsigned comparison error in db_test 218857b remove tailing_iter.h/cc 5d0074c set bytes_per_sync to 1MB if rate limiter is enabled 3fcf7b2 Pass parsed user key to prefix extractor in V2 compaction 2fa6434 Add scope guard 06a52bd Flush only one column family 9674c11 Integrating Cuckoo Hash SST Table format into RocksDB git-subtree-dir: src/rocksdb2 git-subtree-split: 25888ae0068c9b8e3d9421ea8c78a7be339298d8
647 lines
23 KiB
C++
647 lines
23 KiB
C++
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "db/memtable.h"
|
|
|
|
#include <memory>
|
|
#include <algorithm>
|
|
#include <limits>
|
|
|
|
#include "db/dbformat.h"
|
|
#include "db/merge_context.h"
|
|
#include "rocksdb/comparator.h"
|
|
#include "rocksdb/env.h"
|
|
#include "rocksdb/iterator.h"
|
|
#include "rocksdb/merge_operator.h"
|
|
#include "rocksdb/slice_transform.h"
|
|
#include "table/merger.h"
|
|
#include "util/arena.h"
|
|
#include "util/coding.h"
|
|
#include "util/murmurhash.h"
|
|
#include "util/mutexlock.h"
|
|
#include "util/perf_context_imp.h"
|
|
#include "util/statistics.h"
|
|
#include "util/stop_watch.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
MemTableOptions::MemTableOptions(
|
|
const MutableCFOptions& mutable_cf_options, const Options& options)
|
|
: write_buffer_size(mutable_cf_options.write_buffer_size),
|
|
arena_block_size(mutable_cf_options.arena_block_size),
|
|
memtable_prefix_bloom_bits(mutable_cf_options.memtable_prefix_bloom_bits),
|
|
memtable_prefix_bloom_probes(
|
|
mutable_cf_options.memtable_prefix_bloom_probes),
|
|
memtable_prefix_bloom_huge_page_tlb_size(
|
|
mutable_cf_options.memtable_prefix_bloom_huge_page_tlb_size),
|
|
inplace_update_support(options.inplace_update_support),
|
|
inplace_update_num_locks(options.inplace_update_num_locks),
|
|
inplace_callback(options.inplace_callback),
|
|
max_successive_merges(mutable_cf_options.max_successive_merges),
|
|
filter_deletes(mutable_cf_options.filter_deletes) {}
|
|
|
|
MemTable::MemTable(const InternalKeyComparator& cmp,
|
|
const ImmutableCFOptions& ioptions,
|
|
const MemTableOptions& moptions)
|
|
: comparator_(cmp),
|
|
ioptions_(ioptions),
|
|
moptions_(moptions),
|
|
refs_(0),
|
|
kArenaBlockSize(OptimizeBlockSize(moptions.arena_block_size)),
|
|
arena_(moptions.arena_block_size),
|
|
table_(ioptions.memtable_factory->CreateMemTableRep(
|
|
comparator_, &arena_, ioptions.prefix_extractor, ioptions.info_log)),
|
|
num_entries_(0),
|
|
flush_in_progress_(false),
|
|
flush_completed_(false),
|
|
file_number_(0),
|
|
first_seqno_(0),
|
|
mem_next_logfile_number_(0),
|
|
locks_(moptions.inplace_update_support ? moptions.inplace_update_num_locks
|
|
: 0),
|
|
prefix_extractor_(ioptions.prefix_extractor),
|
|
should_flush_(ShouldFlushNow()),
|
|
flush_scheduled_(false) {
|
|
// if should_flush_ == true without an entry inserted, something must have
|
|
// gone wrong already.
|
|
assert(!should_flush_);
|
|
if (prefix_extractor_ && moptions.memtable_prefix_bloom_bits > 0) {
|
|
prefix_bloom_.reset(new DynamicBloom(
|
|
&arena_,
|
|
moptions.memtable_prefix_bloom_bits, ioptions.bloom_locality,
|
|
moptions.memtable_prefix_bloom_probes, nullptr,
|
|
moptions.memtable_prefix_bloom_huge_page_tlb_size,
|
|
ioptions.info_log));
|
|
}
|
|
}
|
|
|
|
MemTable::~MemTable() { assert(refs_ == 0); }
|
|
|
|
size_t MemTable::ApproximateMemoryUsage() {
|
|
size_t arena_usage = arena_.ApproximateMemoryUsage();
|
|
size_t table_usage = table_->ApproximateMemoryUsage();
|
|
// let MAX_USAGE = std::numeric_limits<size_t>::max()
|
|
// then if arena_usage + total_usage >= MAX_USAGE, return MAX_USAGE.
|
|
// the following variation is to avoid numeric overflow.
|
|
if (arena_usage >= std::numeric_limits<size_t>::max() - table_usage) {
|
|
return std::numeric_limits<size_t>::max();
|
|
}
|
|
// otherwise, return the actual usage
|
|
return arena_usage + table_usage;
|
|
}
|
|
|
|
bool MemTable::ShouldFlushNow() const {
|
|
// In a lot of times, we cannot allocate arena blocks that exactly matches the
|
|
// buffer size. Thus we have to decide if we should over-allocate or
|
|
// under-allocate.
|
|
// This constant avariable can be interpreted as: if we still have more than
|
|
// "kAllowOverAllocationRatio * kArenaBlockSize" space left, we'd try to over
|
|
// allocate one more block.
|
|
const double kAllowOverAllocationRatio = 0.6;
|
|
|
|
// If arena still have room for new block allocation, we can safely say it
|
|
// shouldn't flush.
|
|
auto allocated_memory =
|
|
table_->ApproximateMemoryUsage() + arena_.MemoryAllocatedBytes();
|
|
|
|
// if we can still allocate one more block without exceeding the
|
|
// over-allocation ratio, then we should not flush.
|
|
if (allocated_memory + kArenaBlockSize <
|
|
moptions_.write_buffer_size +
|
|
kArenaBlockSize * kAllowOverAllocationRatio) {
|
|
return false;
|
|
}
|
|
|
|
// if user keeps adding entries that exceeds moptions.write_buffer_size,
|
|
// we need to flush earlier even though we still have much available
|
|
// memory left.
|
|
if (allocated_memory > moptions_.write_buffer_size +
|
|
kArenaBlockSize * kAllowOverAllocationRatio) {
|
|
return true;
|
|
}
|
|
|
|
// In this code path, Arena has already allocated its "last block", which
|
|
// means the total allocatedmemory size is either:
|
|
// (1) "moderately" over allocated the memory (no more than `0.6 * arena
|
|
// block size`. Or,
|
|
// (2) the allocated memory is less than write buffer size, but we'll stop
|
|
// here since if we allocate a new arena block, we'll over allocate too much
|
|
// more (half of the arena block size) memory.
|
|
//
|
|
// In either case, to avoid over-allocate, the last block will stop allocation
|
|
// when its usage reaches a certain ratio, which we carefully choose "0.75
|
|
// full" as the stop condition because it addresses the following issue with
|
|
// great simplicity: What if the next inserted entry's size is
|
|
// bigger than AllocatedAndUnused()?
|
|
//
|
|
// The answer is: if the entry size is also bigger than 0.25 *
|
|
// kArenaBlockSize, a dedicated block will be allocated for it; otherwise
|
|
// arena will anyway skip the AllocatedAndUnused() and allocate a new, empty
|
|
// and regular block. In either case, we *overly* over-allocated.
|
|
//
|
|
// Therefore, setting the last block to be at most "0.75 full" avoids both
|
|
// cases.
|
|
//
|
|
// NOTE: the average percentage of waste space of this approach can be counted
|
|
// as: "arena block size * 0.25 / write buffer size". User who specify a small
|
|
// write buffer size and/or big arena block size may suffer.
|
|
return arena_.AllocatedAndUnused() < kArenaBlockSize / 4;
|
|
}
|
|
|
|
int MemTable::KeyComparator::operator()(const char* prefix_len_key1,
|
|
const char* prefix_len_key2) const {
|
|
// Internal keys are encoded as length-prefixed strings.
|
|
Slice k1 = GetLengthPrefixedSlice(prefix_len_key1);
|
|
Slice k2 = GetLengthPrefixedSlice(prefix_len_key2);
|
|
return comparator.Compare(k1, k2);
|
|
}
|
|
|
|
int MemTable::KeyComparator::operator()(const char* prefix_len_key,
|
|
const Slice& key)
|
|
const {
|
|
// Internal keys are encoded as length-prefixed strings.
|
|
Slice a = GetLengthPrefixedSlice(prefix_len_key);
|
|
return comparator.Compare(a, key);
|
|
}
|
|
|
|
Slice MemTableRep::UserKey(const char* key) const {
|
|
Slice slice = GetLengthPrefixedSlice(key);
|
|
return Slice(slice.data(), slice.size() - 8);
|
|
}
|
|
|
|
KeyHandle MemTableRep::Allocate(const size_t len, char** buf) {
|
|
*buf = arena_->Allocate(len);
|
|
return static_cast<KeyHandle>(*buf);
|
|
}
|
|
|
|
// Encode a suitable internal key target for "target" and return it.
|
|
// Uses *scratch as scratch space, and the returned pointer will point
|
|
// into this scratch space.
|
|
const char* EncodeKey(std::string* scratch, const Slice& target) {
|
|
scratch->clear();
|
|
PutVarint32(scratch, target.size());
|
|
scratch->append(target.data(), target.size());
|
|
return scratch->data();
|
|
}
|
|
|
|
class MemTableIterator: public Iterator {
|
|
public:
|
|
MemTableIterator(
|
|
const MemTable& mem, const ReadOptions& read_options, Arena* arena)
|
|
: bloom_(nullptr),
|
|
prefix_extractor_(mem.prefix_extractor_),
|
|
valid_(false),
|
|
arena_mode_(arena != nullptr) {
|
|
if (prefix_extractor_ != nullptr && !read_options.total_order_seek) {
|
|
bloom_ = mem.prefix_bloom_.get();
|
|
iter_ = mem.table_->GetDynamicPrefixIterator(arena);
|
|
} else {
|
|
iter_ = mem.table_->GetIterator(arena);
|
|
}
|
|
}
|
|
|
|
~MemTableIterator() {
|
|
if (arena_mode_) {
|
|
iter_->~Iterator();
|
|
} else {
|
|
delete iter_;
|
|
}
|
|
}
|
|
|
|
virtual bool Valid() const { return valid_; }
|
|
virtual void Seek(const Slice& k) {
|
|
if (bloom_ != nullptr &&
|
|
!bloom_->MayContain(prefix_extractor_->Transform(ExtractUserKey(k)))) {
|
|
valid_ = false;
|
|
return;
|
|
}
|
|
iter_->Seek(k, nullptr);
|
|
valid_ = iter_->Valid();
|
|
}
|
|
virtual void SeekToFirst() {
|
|
iter_->SeekToFirst();
|
|
valid_ = iter_->Valid();
|
|
}
|
|
virtual void SeekToLast() {
|
|
iter_->SeekToLast();
|
|
valid_ = iter_->Valid();
|
|
}
|
|
virtual void Next() {
|
|
assert(Valid());
|
|
iter_->Next();
|
|
valid_ = iter_->Valid();
|
|
}
|
|
virtual void Prev() {
|
|
assert(Valid());
|
|
iter_->Prev();
|
|
valid_ = iter_->Valid();
|
|
}
|
|
virtual Slice key() const {
|
|
assert(Valid());
|
|
return GetLengthPrefixedSlice(iter_->key());
|
|
}
|
|
virtual Slice value() const {
|
|
assert(Valid());
|
|
Slice key_slice = GetLengthPrefixedSlice(iter_->key());
|
|
return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
|
|
}
|
|
|
|
virtual Status status() const { return Status::OK(); }
|
|
|
|
private:
|
|
DynamicBloom* bloom_;
|
|
const SliceTransform* const prefix_extractor_;
|
|
MemTableRep::Iterator* iter_;
|
|
bool valid_;
|
|
bool arena_mode_;
|
|
|
|
// No copying allowed
|
|
MemTableIterator(const MemTableIterator&);
|
|
void operator=(const MemTableIterator&);
|
|
};
|
|
|
|
Iterator* MemTable::NewIterator(const ReadOptions& read_options, Arena* arena) {
|
|
assert(arena != nullptr);
|
|
auto mem = arena->AllocateAligned(sizeof(MemTableIterator));
|
|
return new (mem) MemTableIterator(*this, read_options, arena);
|
|
}
|
|
|
|
port::RWMutex* MemTable::GetLock(const Slice& key) {
|
|
static murmur_hash hash;
|
|
return &locks_[hash(key) % locks_.size()];
|
|
}
|
|
|
|
void MemTable::Add(SequenceNumber s, ValueType type,
|
|
const Slice& key, /* user key */
|
|
const Slice& value) {
|
|
// Format of an entry is concatenation of:
|
|
// key_size : varint32 of internal_key.size()
|
|
// key bytes : char[internal_key.size()]
|
|
// value_size : varint32 of value.size()
|
|
// value bytes : char[value.size()]
|
|
size_t key_size = key.size();
|
|
size_t val_size = value.size();
|
|
size_t internal_key_size = key_size + 8;
|
|
const size_t encoded_len =
|
|
VarintLength(internal_key_size) + internal_key_size +
|
|
VarintLength(val_size) + val_size;
|
|
char* buf = nullptr;
|
|
KeyHandle handle = table_->Allocate(encoded_len, &buf);
|
|
assert(buf != nullptr);
|
|
char* p = EncodeVarint32(buf, internal_key_size);
|
|
memcpy(p, key.data(), key_size);
|
|
p += key_size;
|
|
EncodeFixed64(p, (s << 8) | type);
|
|
p += 8;
|
|
p = EncodeVarint32(p, val_size);
|
|
memcpy(p, value.data(), val_size);
|
|
assert((unsigned)(p + val_size - buf) == (unsigned)encoded_len);
|
|
table_->Insert(handle);
|
|
num_entries_++;
|
|
|
|
if (prefix_bloom_) {
|
|
assert(prefix_extractor_);
|
|
prefix_bloom_->Add(prefix_extractor_->Transform(key));
|
|
}
|
|
|
|
// The first sequence number inserted into the memtable
|
|
assert(first_seqno_ == 0 || s > first_seqno_);
|
|
if (first_seqno_ == 0) {
|
|
first_seqno_ = s;
|
|
}
|
|
|
|
should_flush_ = ShouldFlushNow();
|
|
}
|
|
|
|
// Callback from MemTable::Get()
|
|
namespace {
|
|
|
|
struct Saver {
|
|
Status* status;
|
|
const LookupKey* key;
|
|
bool* found_final_value; // Is value set correctly? Used by KeyMayExist
|
|
bool* merge_in_progress;
|
|
std::string* value;
|
|
const MergeOperator* merge_operator;
|
|
// the merge operations encountered;
|
|
MergeContext* merge_context;
|
|
MemTable* mem;
|
|
Logger* logger;
|
|
Statistics* statistics;
|
|
bool inplace_update_support;
|
|
};
|
|
} // namespace
|
|
|
|
static bool SaveValue(void* arg, const char* entry) {
|
|
Saver* s = reinterpret_cast<Saver*>(arg);
|
|
MergeContext* merge_context = s->merge_context;
|
|
const MergeOperator* merge_operator = s->merge_operator;
|
|
|
|
assert(s != nullptr && merge_context != nullptr);
|
|
|
|
// entry format is:
|
|
// klength varint32
|
|
// userkey char[klength-8]
|
|
// tag uint64
|
|
// vlength varint32
|
|
// value char[vlength]
|
|
// Check that it belongs to same user key. We do not check the
|
|
// sequence number since the Seek() call above should have skipped
|
|
// all entries with overly large sequence numbers.
|
|
uint32_t key_length;
|
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
|
if (s->mem->GetInternalKeyComparator().user_comparator()->Compare(
|
|
Slice(key_ptr, key_length - 8), s->key->user_key()) == 0) {
|
|
// Correct user key
|
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
|
switch (static_cast<ValueType>(tag & 0xff)) {
|
|
case kTypeValue: {
|
|
if (s->inplace_update_support) {
|
|
s->mem->GetLock(s->key->user_key())->ReadLock();
|
|
}
|
|
Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
|
|
*(s->status) = Status::OK();
|
|
if (*(s->merge_in_progress)) {
|
|
assert(merge_operator);
|
|
if (!merge_operator->FullMerge(s->key->user_key(), &v,
|
|
merge_context->GetOperands(), s->value,
|
|
s->logger)) {
|
|
RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
|
|
*(s->status) =
|
|
Status::Corruption("Error: Could not perform merge.");
|
|
}
|
|
} else {
|
|
s->value->assign(v.data(), v.size());
|
|
}
|
|
if (s->inplace_update_support) {
|
|
s->mem->GetLock(s->key->user_key())->ReadUnlock();
|
|
}
|
|
*(s->found_final_value) = true;
|
|
return false;
|
|
}
|
|
case kTypeDeletion: {
|
|
if (*(s->merge_in_progress)) {
|
|
assert(merge_operator);
|
|
*(s->status) = Status::OK();
|
|
if (!merge_operator->FullMerge(s->key->user_key(), nullptr,
|
|
merge_context->GetOperands(), s->value,
|
|
s->logger)) {
|
|
RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
|
|
*(s->status) =
|
|
Status::Corruption("Error: Could not perform merge.");
|
|
}
|
|
} else {
|
|
*(s->status) = Status::NotFound();
|
|
}
|
|
*(s->found_final_value) = true;
|
|
return false;
|
|
}
|
|
case kTypeMerge: {
|
|
if (!merge_operator) {
|
|
*(s->status) = Status::InvalidArgument(
|
|
"merge_operator is not properly initialized.");
|
|
// Normally we continue the loop (return true) when we see a merge
|
|
// operand. But in case of an error, we should stop the loop
|
|
// immediately and pretend we have found the value to stop further
|
|
// seek. Otherwise, the later call will override this error status.
|
|
*(s->found_final_value) = true;
|
|
return false;
|
|
}
|
|
Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
|
|
*(s->merge_in_progress) = true;
|
|
merge_context->PushOperand(v);
|
|
return true;
|
|
}
|
|
default:
|
|
assert(false);
|
|
return true;
|
|
}
|
|
}
|
|
|
|
// s->state could be Corrupt, merge or notfound
|
|
return false;
|
|
}
|
|
|
|
bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
|
|
MergeContext* merge_context) {
|
|
// The sequence number is updated synchronously in version_set.h
|
|
if (IsEmpty()) {
|
|
// Avoiding recording stats for speed.
|
|
return false;
|
|
}
|
|
PERF_TIMER_GUARD(get_from_memtable_time);
|
|
|
|
Slice user_key = key.user_key();
|
|
bool found_final_value = false;
|
|
bool merge_in_progress = s->IsMergeInProgress();
|
|
|
|
if (prefix_bloom_ &&
|
|
!prefix_bloom_->MayContain(prefix_extractor_->Transform(user_key))) {
|
|
// iter is null if prefix bloom says the key does not exist
|
|
} else {
|
|
Saver saver;
|
|
saver.status = s;
|
|
saver.found_final_value = &found_final_value;
|
|
saver.merge_in_progress = &merge_in_progress;
|
|
saver.key = &key;
|
|
saver.value = value;
|
|
saver.status = s;
|
|
saver.mem = this;
|
|
saver.merge_context = merge_context;
|
|
saver.merge_operator = ioptions_.merge_operator;
|
|
saver.logger = ioptions_.info_log;
|
|
saver.inplace_update_support = moptions_.inplace_update_support;
|
|
saver.statistics = ioptions_.statistics;
|
|
table_->Get(key, &saver, SaveValue);
|
|
}
|
|
|
|
// No change to value, since we have not yet found a Put/Delete
|
|
if (!found_final_value && merge_in_progress) {
|
|
*s = Status::MergeInProgress("");
|
|
}
|
|
PERF_COUNTER_ADD(get_from_memtable_count, 1);
|
|
return found_final_value;
|
|
}
|
|
|
|
void MemTable::Update(SequenceNumber seq,
|
|
const Slice& key,
|
|
const Slice& value) {
|
|
LookupKey lkey(key, seq);
|
|
Slice mem_key = lkey.memtable_key();
|
|
|
|
std::unique_ptr<MemTableRep::Iterator> iter(
|
|
table_->GetDynamicPrefixIterator());
|
|
iter->Seek(lkey.internal_key(), mem_key.data());
|
|
|
|
if (iter->Valid()) {
|
|
// entry format is:
|
|
// key_length varint32
|
|
// userkey char[klength-8]
|
|
// tag uint64
|
|
// vlength varint32
|
|
// value char[vlength]
|
|
// Check that it belongs to same user key. We do not check the
|
|
// sequence number since the Seek() call above should have skipped
|
|
// all entries with overly large sequence numbers.
|
|
const char* entry = iter->key();
|
|
uint32_t key_length = 0;
|
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
|
if (comparator_.comparator.user_comparator()->Compare(
|
|
Slice(key_ptr, key_length - 8), lkey.user_key()) == 0) {
|
|
// Correct user key
|
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
|
switch (static_cast<ValueType>(tag & 0xff)) {
|
|
case kTypeValue: {
|
|
Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
|
|
uint32_t prev_size = prev_value.size();
|
|
uint32_t new_size = value.size();
|
|
|
|
// Update value, if new value size <= previous value size
|
|
if (new_size <= prev_size ) {
|
|
char* p = EncodeVarint32(const_cast<char*>(key_ptr) + key_length,
|
|
new_size);
|
|
WriteLock wl(GetLock(lkey.user_key()));
|
|
memcpy(p, value.data(), value.size());
|
|
assert((unsigned)((p + value.size()) - entry) ==
|
|
(unsigned)(VarintLength(key_length) + key_length +
|
|
VarintLength(value.size()) + value.size()));
|
|
return;
|
|
}
|
|
}
|
|
default:
|
|
// If the latest value is kTypeDeletion, kTypeMerge or kTypeLogData
|
|
// we don't have enough space for update inplace
|
|
Add(seq, kTypeValue, key, value);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
// key doesn't exist
|
|
Add(seq, kTypeValue, key, value);
|
|
}
|
|
|
|
bool MemTable::UpdateCallback(SequenceNumber seq,
|
|
const Slice& key,
|
|
const Slice& delta) {
|
|
LookupKey lkey(key, seq);
|
|
Slice memkey = lkey.memtable_key();
|
|
|
|
std::unique_ptr<MemTableRep::Iterator> iter(
|
|
table_->GetDynamicPrefixIterator());
|
|
iter->Seek(lkey.internal_key(), memkey.data());
|
|
|
|
if (iter->Valid()) {
|
|
// entry format is:
|
|
// key_length varint32
|
|
// userkey char[klength-8]
|
|
// tag uint64
|
|
// vlength varint32
|
|
// value char[vlength]
|
|
// Check that it belongs to same user key. We do not check the
|
|
// sequence number since the Seek() call above should have skipped
|
|
// all entries with overly large sequence numbers.
|
|
const char* entry = iter->key();
|
|
uint32_t key_length = 0;
|
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
|
if (comparator_.comparator.user_comparator()->Compare(
|
|
Slice(key_ptr, key_length - 8), lkey.user_key()) == 0) {
|
|
// Correct user key
|
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
|
switch (static_cast<ValueType>(tag & 0xff)) {
|
|
case kTypeValue: {
|
|
Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
|
|
uint32_t prev_size = prev_value.size();
|
|
|
|
char* prev_buffer = const_cast<char*>(prev_value.data());
|
|
uint32_t new_prev_size = prev_size;
|
|
|
|
std::string str_value;
|
|
WriteLock wl(GetLock(lkey.user_key()));
|
|
auto status = moptions_.inplace_callback(prev_buffer, &new_prev_size,
|
|
delta, &str_value);
|
|
if (status == UpdateStatus::UPDATED_INPLACE) {
|
|
// Value already updated by callback.
|
|
assert(new_prev_size <= prev_size);
|
|
if (new_prev_size < prev_size) {
|
|
// overwrite the new prev_size
|
|
char* p = EncodeVarint32(const_cast<char*>(key_ptr) + key_length,
|
|
new_prev_size);
|
|
if (VarintLength(new_prev_size) < VarintLength(prev_size)) {
|
|
// shift the value buffer as well.
|
|
memcpy(p, prev_buffer, new_prev_size);
|
|
}
|
|
}
|
|
RecordTick(ioptions_.statistics, NUMBER_KEYS_UPDATED);
|
|
should_flush_ = ShouldFlushNow();
|
|
return true;
|
|
} else if (status == UpdateStatus::UPDATED) {
|
|
Add(seq, kTypeValue, key, Slice(str_value));
|
|
RecordTick(ioptions_.statistics, NUMBER_KEYS_WRITTEN);
|
|
should_flush_ = ShouldFlushNow();
|
|
return true;
|
|
} else if (status == UpdateStatus::UPDATE_FAILED) {
|
|
// No action required. Return.
|
|
should_flush_ = ShouldFlushNow();
|
|
return true;
|
|
}
|
|
}
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
// If the latest value is not kTypeValue
|
|
// or key doesn't exist
|
|
return false;
|
|
}
|
|
|
|
size_t MemTable::CountSuccessiveMergeEntries(const LookupKey& key) {
|
|
Slice memkey = key.memtable_key();
|
|
|
|
// A total ordered iterator is costly for some memtablerep (prefix aware
|
|
// reps). By passing in the user key, we allow efficient iterator creation.
|
|
// The iterator only needs to be ordered within the same user key.
|
|
std::unique_ptr<MemTableRep::Iterator> iter(
|
|
table_->GetDynamicPrefixIterator());
|
|
iter->Seek(key.internal_key(), memkey.data());
|
|
|
|
size_t num_successive_merges = 0;
|
|
|
|
for (; iter->Valid(); iter->Next()) {
|
|
const char* entry = iter->key();
|
|
uint32_t key_length = 0;
|
|
const char* iter_key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
|
if (comparator_.comparator.user_comparator()->Compare(
|
|
Slice(iter_key_ptr, key_length - 8), key.user_key()) != 0) {
|
|
break;
|
|
}
|
|
|
|
const uint64_t tag = DecodeFixed64(iter_key_ptr + key_length - 8);
|
|
if (static_cast<ValueType>(tag & 0xff) != kTypeMerge) {
|
|
break;
|
|
}
|
|
|
|
++num_successive_merges;
|
|
}
|
|
|
|
return num_successive_merges;
|
|
}
|
|
|
|
void MemTableRep::Get(const LookupKey& k, void* callback_args,
|
|
bool (*callback_func)(void* arg, const char* entry)) {
|
|
auto iter = GetDynamicPrefixIterator();
|
|
for (iter->Seek(k.internal_key(), k.memtable_key().data());
|
|
iter->Valid() && callback_func(callback_args, iter->key());
|
|
iter->Next()) {
|
|
}
|
|
}
|
|
|
|
} // namespace rocksdb
|