Files
xahaud/db/autocompact_test.cc
Vinnie Falco 596a35acca Squashed 'src/hyperleveldb/' changes from ac2ae30..25511b7
25511b7 Merge branch 'master' of github.com:rescrv/HyperLevelDB into hyperdb
ed01020 Make "source" universal
3784d92 Ignore the static file
507319b Don't package with snappy
3e2cc8b Tolerate -fno-rtti
4dcdd6e Drop revision down to 1.0.dev
2542163 Drop all but the latest kept for garbage reasons
9c270b7 Update .gitignore
5331878 Add upack script
adc2a7a Explicitly add -lpthread for Ubuntu
7b57bbd Strip NULL chars passed to LiveBackup
e3b87e7 Add write-buffer-size option to benchmark
2f11087 Followup to snappy support with -DSNAPPY
af503da Improve efficiency of ReplayIterator; fix a bug
33c1f0c Add snappy support
ce1cacf Fix a race in ReplayIterator
5c4679b Fix a bug in the replay_iterator
ca332bd Fix sort algorithm used for compaction boundaries.
d9ec544 make checK
b83a9cd Fix a deadlock in the ReplayIterator dtor
273547b Fix a double-delete in ReplayIterator
3377c7a Add "all" to set of special timestamps
387f43a Timestamp comparison and validation.
f9a6eb1 make distcheck
9a4d0b7 Add a ReplayIterator.
1d53869 Conditionally enable read-driven compaction.
f6fa561 16% end-to-end performance improvement from the skiplist
28ffd32 Merge remote-tracking branch 'upstream/master'
a58de73 Revert "Remove read-driven compactions."
e19fc0c Fix upstream issue 200
748539c LevelDB 1.13
78b7812 Add install instructions to README
e47a48e Make benchmark dir variable
820a096 Update distributed files.
486ca7f Live backup of LevelDB instances
6579884 Put a reference counter on log_/logfile_
3075253 Update internal benchmark.
2a6b0bd Make the Version a parameter of PickCompaction
5bd76dc Release leveldb 1.12

git-subtree-dir: src/hyperleveldb
git-subtree-split: 25511b7a9101b0bafb57349d2194ba80ccbf7bc3
2013-11-19 11:32:55 -08:00

124 lines
3.5 KiB
C++

// Copyright (c) 2013 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "hyperleveldb/db.h"
#include "db/db_impl.h"
#include "hyperleveldb/cache.h"
#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
class AutoCompactTest {
public:
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;
AutoCompactTest() {
dbname_ = test::TmpDir() + "/autocompact_test";
tiny_cache_ = NewLRUCache(100);
options_.block_cache = tiny_cache_;
DestroyDB(dbname_, options_);
options_.create_if_missing = true;
options_.compression = kNoCompression;
ASSERT_OK(DB::Open(options_, dbname_, &db_));
}
~AutoCompactTest() {
delete db_;
DestroyDB(dbname_, Options());
delete tiny_cache_;
}
std::string Key(int i) {
char buf[100];
snprintf(buf, sizeof(buf), "key%06d", i);
return std::string(buf);
}
uint64_t Size(const Slice& start, const Slice& limit) {
Range r(start, limit);
uint64_t size;
db_->GetApproximateSizes(&r, 1, &size);
return size;
}
void DoReads(int n);
};
static const int kValueSize = 200 * 1024;
static const int kTotalSize = 100 * 1024 * 1024;
static const int kCount = kTotalSize / kValueSize;
// Read through the first n keys repeatedly and check that they get
// compacted (verified by checking the size of the key space).
void AutoCompactTest::DoReads(int n) {
std::string value(kValueSize, 'x');
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
// Fill database
for (int i = 0; i < kCount; i++) {
ASSERT_OK(db_->Put(WriteOptions(), Key(i), value));
}
ASSERT_OK(dbi->TEST_CompactMemTable());
// Delete everything
for (int i = 0; i < kCount; i++) {
ASSERT_OK(db_->Delete(WriteOptions(), Key(i)));
}
ASSERT_OK(dbi->TEST_CompactMemTable());
// Get initial measurement of the space we will be reading.
const int64_t initial_size = Size(Key(0), Key(n));
const int64_t initial_other_size = Size(Key(n), Key(kCount));
// Read until size drops significantly.
std::string limit_key = Key(n);
for (int read = 0; true; read++) {
ASSERT_LT(read, 100) << "Taking too long to compact";
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst();
iter->Valid() && iter->key().ToString() < limit_key;
iter->Next()) {
// Drop data
}
delete iter;
// Wait a little bit to allow any triggered compactions to complete.
Env::Default()->SleepForMicroseconds(1000000);
uint64_t size = Size(Key(0), Key(n));
fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
if (size <= initial_size/10) {
break;
}
}
// Verify that the size of the key space not touched by the reads
// is pretty much unchanged.
const int64_t final_other_size = Size(Key(n), Key(kCount));
ASSERT_LE(final_other_size, initial_other_size + 1048576);
ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
}
TEST(AutoCompactTest, ReadAll) {
DoReads(kCount);
}
// HyperLevelDB's ratio-driven compactions always compact everything here. The
// reads trigger the compaction, but then the system decides it is more
// effiicient to just collect everything, emptying the db completely.
#if 0
TEST(AutoCompactTest, ReadHalf) {
DoReads(kCount/2);
}
#endif
} // namespace leveldb
int main(int argc, char** argv) {
return leveldb::test::RunAllTests();
}