mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
[RocksDB] Use raw pointer instead of shared pointer when passing Statistics object internally
Summary: liveness of the statistics object is already ensured by the shared pointer in DB options. There's no reason to pass again shared pointer among internal functions. Raw pointer is sufficient and efficient. Test Plan: make check Reviewers: dhruba, MarkCallaghan, igor Reviewed By: dhruba CC: leveldb, reconnect.grayhat Differential Revision: https://reviews.facebook.net/D14289
This commit is contained in:
@@ -112,6 +112,7 @@ Status BuildTable(const std::string& dbname,
|
||||
|
||||
if (this_ikey.type == kTypeMerge) {
|
||||
// Handle merge-type keys using the MergeHelper
|
||||
// TODO: pass statistics to MergeUntil
|
||||
merge.MergeUntil(iter, 0 /* don't worry about snapshot */);
|
||||
iterator_at_next = true;
|
||||
if (merge.IsSuccess()) {
|
||||
@@ -188,10 +189,10 @@ Status BuildTable(const std::string& dbname,
|
||||
// Finish and check for file errors
|
||||
if (s.ok() && !options.disableDataSync) {
|
||||
if (options.use_fsync) {
|
||||
StopWatch sw(env, options.statistics, TABLE_SYNC_MICROS);
|
||||
StopWatch sw(env, options.statistics.get(), TABLE_SYNC_MICROS);
|
||||
s = file->Fsync();
|
||||
} else {
|
||||
StopWatch sw(env, options.statistics, TABLE_SYNC_MICROS);
|
||||
StopWatch sw(env, options.statistics.get(), TABLE_SYNC_MICROS);
|
||||
s = file->Sync();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -404,7 +404,7 @@ const Status DBImpl::CreateArchivalDirectory() {
|
||||
}
|
||||
|
||||
void DBImpl::PrintStatistics() {
|
||||
auto dbstats = options_.statistics;
|
||||
auto dbstats = options_.statistics.get();
|
||||
if (dbstats) {
|
||||
Log(options_.info_log,
|
||||
"STATISTCS:\n %s",
|
||||
@@ -860,7 +860,7 @@ Status DBImpl::Recover(VersionEdit* edit, MemTable* external_table,
|
||||
if (versions_->LastSequence() < max_sequence) {
|
||||
versions_->SetLastSequence(max_sequence);
|
||||
}
|
||||
SetTickerCount(options_.statistics, SEQUENCE_NUMBER,
|
||||
SetTickerCount(options_.statistics.get(), SEQUENCE_NUMBER,
|
||||
versions_->LastSequence());
|
||||
}
|
||||
}
|
||||
@@ -1297,7 +1297,7 @@ SequenceNumber DBImpl::GetLatestSequenceNumber() const {
|
||||
Status DBImpl::GetUpdatesSince(SequenceNumber seq,
|
||||
unique_ptr<TransactionLogIterator>* iter) {
|
||||
|
||||
RecordTick(options_.statistics, GET_UPDATES_SINCE_CALLS);
|
||||
RecordTick(options_.statistics.get(), GET_UPDATES_SINCE_CALLS);
|
||||
if (seq > versions_->LastSequence()) {
|
||||
return Status::IOError("Requested sequence not yet written in the db");
|
||||
}
|
||||
@@ -1971,10 +1971,12 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
|
||||
// Finish and check for file errors
|
||||
if (s.ok() && !options_.disableDataSync) {
|
||||
if (options_.use_fsync) {
|
||||
StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
|
||||
StopWatch sw(env_, options_.statistics.get(),
|
||||
COMPACTION_OUTFILE_SYNC_MICROS);
|
||||
s = compact->outfile->Fsync();
|
||||
} else {
|
||||
StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
|
||||
StopWatch sw(env_, options_.statistics.get(),
|
||||
COMPACTION_OUTFILE_SYNC_MICROS);
|
||||
s = compact->outfile->Sync();
|
||||
}
|
||||
}
|
||||
@@ -2212,7 +2214,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
||||
ParseInternalKey(key, &ikey);
|
||||
// no value associated with delete
|
||||
value.clear();
|
||||
RecordTick(options_.statistics, COMPACTION_KEY_DROP_USER);
|
||||
RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_USER);
|
||||
} else if (value_changed) {
|
||||
value = compaction_filter_value;
|
||||
}
|
||||
@@ -2238,7 +2240,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
||||
// TODO: why not > ?
|
||||
assert(last_sequence_for_key >= ikey.sequence);
|
||||
drop = true; // (A)
|
||||
RecordTick(options_.statistics, COMPACTION_KEY_DROP_NEWER_ENTRY);
|
||||
RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_NEWER_ENTRY);
|
||||
} else if (ikey.type == kTypeDeletion &&
|
||||
ikey.sequence <= earliest_snapshot &&
|
||||
compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
|
||||
@@ -2250,7 +2252,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
||||
// few iterations of this loop (by rule (A) above).
|
||||
// Therefore this deletion marker is obsolete and can be dropped.
|
||||
drop = true;
|
||||
RecordTick(options_.statistics, COMPACTION_KEY_DROP_OBSOLETE);
|
||||
RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_OBSOLETE);
|
||||
} else if (ikey.type == kTypeMerge) {
|
||||
// We know the merge type entry is not hidden, otherwise we would
|
||||
// have hit (A)
|
||||
@@ -2259,7 +2261,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
||||
// logic could also be nicely re-used for memtable flush purge
|
||||
// optimization in BuildTable.
|
||||
merge.MergeUntil(input.get(), prev_snapshot, bottommost_level,
|
||||
options_.statistics);
|
||||
options_.statistics.get());
|
||||
current_entry_is_merging = true;
|
||||
if (merge.IsSuccess()) {
|
||||
// Successfully found Put/Delete/(end-of-key-range) while merging
|
||||
@@ -2412,8 +2414,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
||||
|
||||
CompactionStats stats;
|
||||
stats.micros = env_->NowMicros() - start_micros - imm_micros;
|
||||
if (options_.statistics) {
|
||||
options_.statistics->measureTime(COMPACTION_TIME, stats.micros);
|
||||
if (options_.statistics.get()) {
|
||||
options_.statistics.get()->measureTime(COMPACTION_TIME, stats.micros);
|
||||
}
|
||||
stats.files_in_leveln = compact->compaction->num_input_files(0);
|
||||
stats.files_in_levelnp1 = compact->compaction->num_input_files(1);
|
||||
@@ -2554,7 +2556,7 @@ Status DBImpl::GetImpl(const ReadOptions& options,
|
||||
bool* value_found) {
|
||||
Status s;
|
||||
|
||||
StopWatch sw(env_, options_.statistics, DB_GET);
|
||||
StopWatch sw(env_, options_.statistics.get(), DB_GET);
|
||||
SequenceNumber snapshot;
|
||||
mutex_.Lock();
|
||||
if (options.snapshot != nullptr) {
|
||||
@@ -2605,8 +2607,8 @@ Status DBImpl::GetImpl(const ReadOptions& options,
|
||||
|
||||
LogFlush(options_.info_log);
|
||||
// Note, tickers are atomic now - no lock protection needed any more.
|
||||
RecordTick(options_.statistics, NUMBER_KEYS_READ);
|
||||
RecordTick(options_.statistics, BYTES_READ, value->size());
|
||||
RecordTick(options_.statistics.get(), NUMBER_KEYS_READ);
|
||||
RecordTick(options_.statistics.get(), BYTES_READ, value->size());
|
||||
return s;
|
||||
}
|
||||
|
||||
@@ -2614,7 +2616,7 @@ std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
|
||||
const std::vector<Slice>& keys,
|
||||
std::vector<std::string>* values) {
|
||||
|
||||
StopWatch sw(env_, options_.statistics, DB_MULTIGET);
|
||||
StopWatch sw(env_, options_.statistics.get(), DB_MULTIGET);
|
||||
SequenceNumber snapshot;
|
||||
mutex_.Lock();
|
||||
if (options.snapshot != nullptr) {
|
||||
@@ -2683,9 +2685,9 @@ std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
|
||||
mutex_.Unlock();
|
||||
|
||||
LogFlush(options_.info_log);
|
||||
RecordTick(options_.statistics, NUMBER_MULTIGET_CALLS);
|
||||
RecordTick(options_.statistics, NUMBER_MULTIGET_KEYS_READ, numKeys);
|
||||
RecordTick(options_.statistics, NUMBER_MULTIGET_BYTES_READ, bytesRead);
|
||||
RecordTick(options_.statistics.get(), NUMBER_MULTIGET_CALLS);
|
||||
RecordTick(options_.statistics.get(), NUMBER_MULTIGET_KEYS_READ, numKeys);
|
||||
RecordTick(options_.statistics.get(), NUMBER_MULTIGET_BYTES_READ, bytesRead);
|
||||
|
||||
return statList;
|
||||
}
|
||||
@@ -2760,7 +2762,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
|
||||
w.disableWAL = options.disableWAL;
|
||||
w.done = false;
|
||||
|
||||
StopWatch sw(env_, options_.statistics, DB_WRITE);
|
||||
StopWatch sw(env_, options_.statistics.get(), DB_WRITE);
|
||||
MutexLock l(&mutex_);
|
||||
writers_.push_back(&w);
|
||||
while (!w.done && &w != writers_.front()) {
|
||||
@@ -2793,8 +2795,9 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
|
||||
int my_batch_count = WriteBatchInternal::Count(updates);
|
||||
last_sequence += my_batch_count;
|
||||
// Record statistics
|
||||
RecordTick(options_.statistics, NUMBER_KEYS_WRITTEN, my_batch_count);
|
||||
RecordTick(options_.statistics,
|
||||
RecordTick(options_.statistics.get(),
|
||||
NUMBER_KEYS_WRITTEN, my_batch_count);
|
||||
RecordTick(options_.statistics.get(),
|
||||
BYTES_WRITTEN,
|
||||
WriteBatchInternal::ByteSize(updates));
|
||||
if (options.disableWAL) {
|
||||
@@ -2808,10 +2811,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
|
||||
BumpPerfTime(&perf_context.wal_write_time, &timer);
|
||||
if (status.ok() && options.sync) {
|
||||
if (options_.use_fsync) {
|
||||
StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
|
||||
StopWatch(env_, options_.statistics.get(), WAL_FILE_SYNC_MICROS);
|
||||
status = log_->file()->Fsync();
|
||||
} else {
|
||||
StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
|
||||
StopWatch(env_, options_.statistics.get(), WAL_FILE_SYNC_MICROS);
|
||||
status = log_->file()->Sync();
|
||||
}
|
||||
}
|
||||
@@ -2826,7 +2829,8 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
|
||||
// have succeeded in memtable but Status reports error for all writes.
|
||||
throw std::runtime_error("In memory WriteBatch corruption!");
|
||||
}
|
||||
SetTickerCount(options_.statistics, SEQUENCE_NUMBER, last_sequence);
|
||||
SetTickerCount(options_.statistics.get(),
|
||||
SEQUENCE_NUMBER, last_sequence);
|
||||
}
|
||||
LogFlush(options_.info_log);
|
||||
mutex_.Lock();
|
||||
@@ -2975,7 +2979,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
||||
mutex_.Unlock();
|
||||
uint64_t delayed;
|
||||
{
|
||||
StopWatch sw(env_, options_.statistics, STALL_L0_SLOWDOWN_COUNT);
|
||||
StopWatch sw(env_, options_.statistics.get(), STALL_L0_SLOWDOWN_COUNT);
|
||||
env_->SleepForMicroseconds(
|
||||
SlowdownAmount(versions_->NumLevelFiles(0),
|
||||
options_.level0_slowdown_writes_trigger,
|
||||
@@ -2983,7 +2987,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
||||
);
|
||||
delayed = sw.ElapsedMicros();
|
||||
}
|
||||
RecordTick(options_.statistics, STALL_L0_SLOWDOWN_MICROS, delayed);
|
||||
RecordTick(options_.statistics.get(), STALL_L0_SLOWDOWN_MICROS, delayed);
|
||||
stall_level0_slowdown_ += delayed;
|
||||
stall_level0_slowdown_count_++;
|
||||
allow_delay = false; // Do not delay a single write more than once
|
||||
@@ -3003,12 +3007,13 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
||||
Log(options_.info_log, "wait for memtable compaction...\n");
|
||||
uint64_t stall;
|
||||
{
|
||||
StopWatch sw(env_, options_.statistics,
|
||||
StopWatch sw(env_, options_.statistics.get(),
|
||||
STALL_MEMTABLE_COMPACTION_COUNT);
|
||||
bg_cv_.Wait();
|
||||
stall = sw.ElapsedMicros();
|
||||
}
|
||||
RecordTick(options_.statistics, STALL_MEMTABLE_COMPACTION_MICROS, stall);
|
||||
RecordTick(options_.statistics.get(),
|
||||
STALL_MEMTABLE_COMPACTION_MICROS, stall);
|
||||
stall_memtable_compaction_ += stall;
|
||||
stall_memtable_compaction_count_++;
|
||||
} else if (versions_->NumLevelFiles(0) >=
|
||||
@@ -3018,11 +3023,12 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
||||
Log(options_.info_log, "wait for fewer level0 files...\n");
|
||||
uint64_t stall;
|
||||
{
|
||||
StopWatch sw(env_, options_.statistics, STALL_L0_NUM_FILES_COUNT);
|
||||
StopWatch sw(env_, options_.statistics.get(),
|
||||
STALL_L0_NUM_FILES_COUNT);
|
||||
bg_cv_.Wait();
|
||||
stall = sw.ElapsedMicros();
|
||||
}
|
||||
RecordTick(options_.statistics, STALL_L0_NUM_FILES_MICROS, stall);
|
||||
RecordTick(options_.statistics.get(), STALL_L0_NUM_FILES_MICROS, stall);
|
||||
stall_level0_num_files_ += stall;
|
||||
stall_level0_num_files_count_++;
|
||||
} else if (
|
||||
@@ -3034,7 +3040,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
||||
mutex_.Unlock();
|
||||
uint64_t delayed;
|
||||
{
|
||||
StopWatch sw(env_, options_.statistics, HARD_RATE_LIMIT_DELAY_COUNT);
|
||||
StopWatch sw(env_, options_.statistics.get(),
|
||||
HARD_RATE_LIMIT_DELAY_COUNT);
|
||||
env_->SleepForMicroseconds(1000);
|
||||
delayed = sw.ElapsedMicros();
|
||||
}
|
||||
@@ -3043,7 +3050,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
||||
// Make sure the following value doesn't round to zero.
|
||||
uint64_t rate_limit = std::max((delayed / 1000), (uint64_t) 1);
|
||||
rate_limit_delay_millis += rate_limit;
|
||||
RecordTick(options_.statistics, RATE_LIMIT_DELAY_MILLIS, rate_limit);
|
||||
RecordTick(options_.statistics.get(),
|
||||
RATE_LIMIT_DELAY_MILLIS, rate_limit);
|
||||
if (options_.rate_limit_delay_max_milliseconds > 0 &&
|
||||
rate_limit_delay_millis >=
|
||||
(unsigned)options_.rate_limit_delay_max_milliseconds) {
|
||||
@@ -3058,7 +3066,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
||||
// TODO: add statistics
|
||||
mutex_.Unlock();
|
||||
{
|
||||
StopWatch sw(env_, options_.statistics, SOFT_RATE_LIMIT_DELAY_COUNT);
|
||||
StopWatch sw(env_, options_.statistics.get(),
|
||||
SOFT_RATE_LIMIT_DELAY_COUNT);
|
||||
env_->SleepForMicroseconds(SlowdownAmount(
|
||||
score,
|
||||
options_.soft_rate_limit,
|
||||
|
||||
@@ -69,7 +69,7 @@ class DBIter: public Iterator {
|
||||
direction_(kForward),
|
||||
valid_(false),
|
||||
current_entry_is_merged_(false),
|
||||
statistics_(options.statistics) {
|
||||
statistics_(options.statistics.get()) {
|
||||
RecordTick(statistics_, NO_ITERATORS, 1);
|
||||
max_skip_ = options.max_sequential_skip_in_iterations;
|
||||
}
|
||||
@@ -135,7 +135,7 @@ class DBIter: public Iterator {
|
||||
Direction direction_;
|
||||
bool valid_;
|
||||
bool current_entry_is_merged_;
|
||||
std::shared_ptr<Statistics> statistics_;
|
||||
Statistics* statistics_;
|
||||
uint64_t max_skip_;
|
||||
|
||||
// No copying allowed
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include "util/coding.h"
|
||||
#include "util/mutexlock.h"
|
||||
#include "util/murmurhash.h"
|
||||
#include "util/statistics_imp.h"
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
@@ -203,7 +204,7 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
|
||||
assert(merge_operator);
|
||||
if (!merge_operator->FullMerge(key.user_key(), &v, *operands,
|
||||
value, logger.get())) {
|
||||
RecordTick(options.statistics, NUMBER_MERGE_FAILURES);
|
||||
RecordTick(options.statistics.get(), NUMBER_MERGE_FAILURES);
|
||||
*s = Status::Corruption("Error: Could not perform merge.");
|
||||
}
|
||||
} else {
|
||||
@@ -220,7 +221,7 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
|
||||
*s = Status::OK();
|
||||
if (!merge_operator->FullMerge(key.user_key(), nullptr, *operands,
|
||||
value, logger.get())) {
|
||||
RecordTick(options.statistics, NUMBER_MERGE_FAILURES);
|
||||
RecordTick(options.statistics.get(), NUMBER_MERGE_FAILURES);
|
||||
*s = Status::Corruption("Error: Could not perform merge.");
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include "rocksdb/comparator.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/merge_operator.h"
|
||||
#include "util/statistics_imp.h"
|
||||
#include <string>
|
||||
#include <stdio.h>
|
||||
|
||||
@@ -20,7 +21,7 @@ namespace rocksdb {
|
||||
// operands_ stores the list of merge operands encountered while merging.
|
||||
// keys_[i] corresponds to operands_[i] for each i.
|
||||
void MergeHelper::MergeUntil(Iterator* iter, SequenceNumber stop_before,
|
||||
bool at_bottom, shared_ptr<Statistics> stats) {
|
||||
bool at_bottom, Statistics* stats) {
|
||||
// Get a copy of the internal key, before it's invalidated by iter->Next()
|
||||
// Also maintain the list of merge operands seen.
|
||||
keys_.clear();
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include "db/dbformat.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/statistics.h"
|
||||
#include <string>
|
||||
#include <deque>
|
||||
|
||||
@@ -18,6 +17,7 @@ class Comparator;
|
||||
class Iterator;
|
||||
class Logger;
|
||||
class MergeOperator;
|
||||
class Statistics;
|
||||
|
||||
class MergeHelper {
|
||||
public:
|
||||
@@ -46,7 +46,7 @@ class MergeHelper {
|
||||
// at_bottom: (IN) true if the iterator covers the bottem level, which means
|
||||
// we could reach the start of the history of this user key.
|
||||
void MergeUntil(Iterator* iter, SequenceNumber stop_before = 0,
|
||||
bool at_bottom = false, shared_ptr<Statistics> stats=nullptr);
|
||||
bool at_bottom = false, Statistics* stats = nullptr);
|
||||
|
||||
// Query the merge result
|
||||
// These are valid until the next MergeUntil call
|
||||
|
||||
@@ -65,12 +65,12 @@ Status TableCache::FindTable(const EnvOptions& toptions,
|
||||
unique_ptr<RandomAccessFile> file;
|
||||
unique_ptr<TableReader> table_reader;
|
||||
s = env_->NewRandomAccessFile(fname, &file, toptions);
|
||||
RecordTick(options_->statistics, NO_FILE_OPENS);
|
||||
RecordTick(options_->statistics.get(), NO_FILE_OPENS);
|
||||
if (s.ok()) {
|
||||
if (options_->advise_random_on_open) {
|
||||
file->Hint(RandomAccessFile::RANDOM);
|
||||
}
|
||||
StopWatch sw(env_, options_->statistics, TABLE_OPEN_IO_MICROS);
|
||||
StopWatch sw(env_, options_->statistics.get(), TABLE_OPEN_IO_MICROS);
|
||||
s = options_->table_factory->GetTableReader(*options_, toptions,
|
||||
std::move(file), file_size,
|
||||
&table_reader);
|
||||
@@ -78,7 +78,7 @@ Status TableCache::FindTable(const EnvOptions& toptions,
|
||||
|
||||
if (!s.ok()) {
|
||||
assert(table_reader == nullptr);
|
||||
RecordTick(options_->statistics, NO_FILE_ERRORS);
|
||||
RecordTick(options_->statistics.get(), NO_FILE_ERRORS);
|
||||
// We do not cache error results so that if the error is transient,
|
||||
// or somebody repairs the file, we recover automatically.
|
||||
} else {
|
||||
|
||||
@@ -290,7 +290,7 @@ struct Saver {
|
||||
std::deque<std::string>* merge_operands; // the merge operations encountered
|
||||
Logger* logger;
|
||||
bool didIO; // did we do any disk io?
|
||||
shared_ptr<Statistics> statistics;
|
||||
Statistics* statistics;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -439,7 +439,7 @@ void Version::Get(const ReadOptions& options,
|
||||
saver.merge_operands = operands;
|
||||
saver.logger = logger.get();
|
||||
saver.didIO = false;
|
||||
saver.statistics = db_options.statistics;
|
||||
saver.statistics = db_options.statistics.get();
|
||||
|
||||
stats->seek_file = nullptr;
|
||||
stats->seek_file_level = -1;
|
||||
@@ -566,7 +566,7 @@ void Version::Get(const ReadOptions& options,
|
||||
value, logger.get())) {
|
||||
*status = Status::OK();
|
||||
} else {
|
||||
RecordTick(db_options.statistics, NUMBER_MERGE_FAILURES);
|
||||
RecordTick(db_options.statistics.get(), NUMBER_MERGE_FAILURES);
|
||||
*status = Status::Corruption("could not perform end-of-key merge for ",
|
||||
user_key);
|
||||
}
|
||||
@@ -1296,10 +1296,12 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
|
||||
}
|
||||
if (s.ok()) {
|
||||
if (options_->use_fsync) {
|
||||
StopWatch sw(env_, options_->statistics, MANIFEST_FILE_SYNC_MICROS);
|
||||
StopWatch sw(env_, options_->statistics.get(),
|
||||
MANIFEST_FILE_SYNC_MICROS);
|
||||
s = descriptor_log_->file()->Fsync();
|
||||
} else {
|
||||
StopWatch sw(env_, options_->statistics, MANIFEST_FILE_SYNC_MICROS);
|
||||
StopWatch sw(env_, options_->statistics.get(),
|
||||
MANIFEST_FILE_SYNC_MICROS);
|
||||
s = descriptor_log_->file()->Sync();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,15 +20,14 @@
|
||||
// data: uint8[len]
|
||||
|
||||
#include "rocksdb/write_batch.h"
|
||||
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/statistics.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "db/db_impl.h"
|
||||
#include "db/memtable.h"
|
||||
#include "db/snapshot.h"
|
||||
#include "db/write_batch_internal.h"
|
||||
#include "util/coding.h"
|
||||
#include "util/statistics_imp.h"
|
||||
#include <stdexcept>
|
||||
|
||||
namespace rocksdb {
|
||||
@@ -197,7 +196,7 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
virtual void Put(const Slice& key, const Slice& value) {
|
||||
if (options_->inplace_update_support
|
||||
&& mem_->Update(sequence_, kTypeValue, key, value)) {
|
||||
RecordTick(options_->statistics, NUMBER_KEYS_UPDATED);
|
||||
RecordTick(options_->statistics.get(), NUMBER_KEYS_UPDATED);
|
||||
} else {
|
||||
mem_->Add(sequence_, kTypeValue, key, value);
|
||||
}
|
||||
@@ -215,7 +214,7 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
ropts.snapshot = &read_from_snapshot;
|
||||
std::string value;
|
||||
if (!db_->KeyMayExist(ropts, key, &value)) {
|
||||
RecordTick(options_->statistics, NUMBER_FILTERED_DELETES);
|
||||
RecordTick(options_->statistics.get(), NUMBER_FILTERED_DELETES);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user