Files
xahaud/db/replay_iterator.cc
Vinnie Falco 596a35acca Squashed 'src/hyperleveldb/' changes from ac2ae30..25511b7
25511b7 Merge branch 'master' of github.com:rescrv/HyperLevelDB into hyperdb
ed01020 Make "source" universal
3784d92 Ignore the static file
507319b Don't package with snappy
3e2cc8b Tolerate -fno-rtti
4dcdd6e Drop revision down to 1.0.dev
2542163 Drop all but the latest kept for garbage reasons
9c270b7 Update .gitignore
5331878 Add upack script
adc2a7a Explicitly add -lpthread for Ubuntu
7b57bbd Strip NULL chars passed to LiveBackup
e3b87e7 Add write-buffer-size option to benchmark
2f11087 Followup to snappy support with -DSNAPPY
af503da Improve efficiency of ReplayIterator; fix a bug
33c1f0c Add snappy support
ce1cacf Fix a race in ReplayIterator
5c4679b Fix a bug in the replay_iterator
ca332bd Fix sort algorithm used for compaction boundaries.
d9ec544 make checK
b83a9cd Fix a deadlock in the ReplayIterator dtor
273547b Fix a double-delete in ReplayIterator
3377c7a Add "all" to set of special timestamps
387f43a Timestamp comparison and validation.
f9a6eb1 make distcheck
9a4d0b7 Add a ReplayIterator.
1d53869 Conditionally enable read-driven compaction.
f6fa561 16% end-to-end performance improvement from the skiplist
28ffd32 Merge remote-tracking branch 'upstream/master'
a58de73 Revert "Remove read-driven compactions."
e19fc0c Fix upstream issue 200
748539c LevelDB 1.13
78b7812 Add install instructions to README
e47a48e Make benchmark dir variable
820a096 Update distributed files.
486ca7f Live backup of LevelDB instances
6579884 Put a reference counter on log_/logfile_
3075253 Update internal benchmark.
2a6b0bd Make the Version a parameter of PickCompaction
5bd76dc Release leveldb 1.12

git-subtree-dir: src/hyperleveldb
git-subtree-split: 25511b7a9101b0bafb57349d2194ba80ccbf7bc3
2013-11-19 11:32:55 -08:00

205 lines
4.7 KiB
C++

// Copyright (c) 2013 The HyperLevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "replay_iterator.h"
#include "filename.h"
#include "db_impl.h"
#include "dbformat.h"
#include "../hyperleveldb/env.h"
#include "../hyperleveldb/iterator.h"
#include "../port/port.h"
#include "../util/logging.h"
#include "../util/mutexlock.h"
#include "../util/random.h"
namespace hyperleveldb {
ReplayIterator::ReplayIterator() {
}
ReplayIterator::~ReplayIterator() {
}
ReplayState::ReplayState(Iterator* i, SequenceNumber s, SequenceNumber l)
: mem_(NULL),
iter_(i),
seq_start_(s),
seq_limit_(l) {
}
ReplayState::ReplayState(MemTable* m, SequenceNumber s)
: mem_(m),
iter_(NULL),
seq_start_(s),
seq_limit_(0) {
}
ReplayIteratorImpl::ReplayIteratorImpl(DBImpl* db, port::Mutex* mutex, const Comparator* cmp,
Iterator* iter, MemTable* m, SequenceNumber s)
: ReplayIterator(),
db_(db),
mutex_(mutex),
user_comparator_(cmp),
start_at_(s),
valid_(),
status_(),
has_current_user_key_(false),
current_user_key_(),
current_user_sequence_(),
rs_(iter, s, kMaxSequenceNumber),
mems_() {
m->Ref();
mems_.push_back(ReplayState(m, s));
}
ReplayIteratorImpl::~ReplayIteratorImpl() {
}
bool ReplayIteratorImpl::Valid() {
Prime();
return valid_;
}
void ReplayIteratorImpl::Next() {
rs_.iter_->Next();
}
bool ReplayIteratorImpl::HasValue() {
ParsedInternalKey ikey;
return ParseKey(&ikey) && ikey.type == kTypeValue;
}
Slice ReplayIteratorImpl::key() const {
assert(valid_);
return ExtractUserKey(rs_.iter_->key());
}
Slice ReplayIteratorImpl::value() const {
assert(valid_);
return rs_.iter_->value();
}
Status ReplayIteratorImpl::status() const {
if (!status_.ok()) {
return status_;
} else {
return rs_.iter_->status();
}
}
void ReplayIteratorImpl::enqueue(MemTable* m, SequenceNumber s) {
m->Ref();
mems_.push_back(ReplayState(m, s));
}
void ReplayIteratorImpl::cleanup() {
mutex_->Unlock();
if (rs_.iter_) {
delete rs_.iter_;
}
if (rs_.mem_) {
rs_.mem_->Unref();
}
mutex_->Lock();
rs_.iter_ = NULL;
rs_.mem_ = NULL;
while (!mems_.empty()) {
MemTable* mem = mems_.front().mem_;
Iterator* iter = mems_.front().iter_;
mutex_->Unlock();
if (iter) {
delete iter;
}
if (mem) {
mem->Unref();
}
mutex_->Lock();
mems_.pop_front();
}
delete this;
}
bool ReplayIteratorImpl::ParseKey(ParsedInternalKey* ikey) {
return ParseKey(rs_.iter_->key(), ikey);
}
bool ReplayIteratorImpl::ParseKey(const Slice& k, ParsedInternalKey* ikey) {
if (!ParseInternalKey(k, ikey)) {
status_ = Status::Corruption("corrupted internal key in ReplayIteratorImpl");
return false;
} else {
return true;
}
}
void ReplayIteratorImpl::Prime() {
valid_ = false;
if (!status_.ok()) {
return;
}
while (true) {
assert(rs_.iter_);
while (rs_.iter_->Valid()) {
ParsedInternalKey ikey;
if (!ParseKey(rs_.iter_->key(), &ikey)) {
return;
}
// if we can consider this key, and it's recent enough and of the right
// type
if ((!has_current_user_key_ ||
user_comparator_->Compare(ikey.user_key,
Slice(current_user_key_)) != 0 ||
ikey.sequence >= current_user_sequence_) &&
(ikey.sequence >= rs_.seq_start_ &&
(ikey.type == kTypeDeletion || ikey.type == kTypeValue))) {
has_current_user_key_ = true;
current_user_key_.assign(ikey.user_key.data(), ikey.user_key.size());
current_user_sequence_ = ikey.sequence;
valid_ = true;
return;
}
rs_.iter_->Next();
}
if (!rs_.iter_->status().ok()) {
status_ = rs_.iter_->status();
valid_ = false;
return;
}
// we're done with rs_.iter_
has_current_user_key_ = false;
current_user_key_.assign("", 0);
current_user_sequence_ = kMaxSequenceNumber;
delete rs_.iter_;
rs_.iter_ = NULL;
{
MutexLock l(mutex_);
if (mems_.empty() ||
rs_.seq_limit_ < mems_.front().seq_start_) {
rs_.seq_start_ = rs_.seq_limit_;
} else {
if (rs_.mem_) {
rs_.mem_->Unref();
rs_.mem_ = NULL;
}
rs_.mem_ = mems_.front().mem_;
rs_.seq_start_ = mems_.front().seq_start_;
mems_.pop_front();
}
}
rs_.seq_limit_ = db_->LastSequence();
rs_.iter_ = rs_.mem_->NewIterator();
rs_.iter_->SeekToFirst();
assert(rs_.seq_start_ <= rs_.seq_limit_);
if (rs_.seq_start_ == rs_.seq_limit_) {
valid_ = false;
return;
}
}
}
} // namespace leveldb