RocksDB changes to support unity build:

* Remove extra definition of TotalFileSize
* Remove extra definition of ClipToRange
* Move EncodedFileMetaData out from anonymous namespace
* Move version_set Saver to its own namespace
* Move some symbols into a named namespace
* Move symbols out of anonymous namespace (prevents warning)
* Make BloomHash inline
This commit is contained in:
Vinnie Falco
2014-06-04 17:11:33 -07:00
parent 888a3fec21
commit c7f1f6a91f
12 changed files with 33 additions and 59 deletions

View File

@@ -49,15 +49,6 @@ ColumnFamilyHandleImpl::~ColumnFamilyHandleImpl() {
uint32_t ColumnFamilyHandleImpl::GetID() const { return cfd()->GetID(); }
namespace {
// Fix user-supplied options to be reasonable
template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
}
} // anonymous namespace
ColumnFamilyOptions SanitizeOptions(const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,
const ColumnFamilyOptions& src) {

View File

@@ -18,7 +18,7 @@
namespace rocksdb {
static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->file_size;

View File

@@ -10,8 +10,12 @@
#pragma once
#include "db/version_set.h"
#include <vector>
namespace rocksdb {
struct FileMetaData;
class Version;
class ColumnFamilyData;
@@ -155,4 +159,7 @@ class Compaction {
void ResetNextCompactionIndex();
};
// Utility function
extern uint64_t TotalFileSize(const std::vector<FileMetaData*>& files);
} // namespace rocksdb

View File

@@ -19,14 +19,6 @@ namespace rocksdb {
namespace {
uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->file_size;
}
return sum;
}
// Multiple two operands. If they overflow, return op1.
uint64_t MultiplyCheckOverflow(uint64_t op1, int op2) {
if (op1 == 0) {

View File

@@ -252,15 +252,6 @@ struct DBImpl::CompactionState {
}
};
namespace {
// Fix user-supplied options to be reasonable
template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
}
} // anonymous namespace
Options SanitizeOptions(const std::string& dbname,
const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,

View File

@@ -632,4 +632,11 @@ CompressionType GetCompressionType(const Options& options, int level,
// Determine compression type for L0 file written by memtable flush.
CompressionType GetCompressionFlush(const Options& options);
// Fix user-supplied options to be reasonable
template <class T, class V>
inline void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
}
} // namespace rocksdb

View File

@@ -18,6 +18,7 @@
#include <unordered_map>
#include <stdio.h>
#include "db/compaction.h"
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
@@ -39,14 +40,6 @@
namespace rocksdb {
static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->file_size;
}
return sum;
}
Version::~Version() {
assert(refs_ == 0);
@@ -150,7 +143,6 @@ bool SomeFileOverlapsRange(
return !BeforeFile(ucmp, largest_user_key, files[index]);
}
namespace {
// Used for LevelFileNumIterator to pass "block handle" value,
// which actually means file information in this iterator.
// It contains subset of fields of FileMetaData, that is sufficient
@@ -160,7 +152,6 @@ struct EncodedFileMetaData {
uint64_t file_size; // file size
TableReader* table_reader; // cached table reader
};
} // namespace
// An internal iterator. For a given version/level pair, yields
// information about the files in the level. For a given entry, key()
@@ -359,7 +350,6 @@ void Version::AddIterators(const ReadOptions& read_options,
}
// Callback from TableCache::Get()
namespace {
enum SaverState {
kNotFound,
kFound,
@@ -367,6 +357,7 @@ enum SaverState {
kCorrupt,
kMerge // saver contains the current merge result (the operands)
};
namespace version_set {
struct Saver {
SaverState state;
const Comparator* ucmp;
@@ -380,7 +371,7 @@ struct Saver {
bool didIO; // did we do any disk io?
Statistics* statistics;
};
}
} // namespace version_set
// Called from TableCache::Get and Table::Get when file/block in which
// key may exist are not there in TableCache/BlockCache respectively. In this
@@ -388,7 +379,7 @@ struct Saver {
// IO to be certain.Set the status=kFound and value_found=false to let the
// caller know that key may exist but is not there in memory
static void MarkKeyMayExist(void* arg) {
Saver* s = reinterpret_cast<Saver*>(arg);
version_set::Saver* s = reinterpret_cast<version_set::Saver*>(arg);
s->state = kFound;
if (s->value_found != nullptr) {
*(s->value_found) = false;
@@ -397,7 +388,7 @@ static void MarkKeyMayExist(void* arg) {
static bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
const Slice& v, bool didIO) {
Saver* s = reinterpret_cast<Saver*>(arg);
version_set::Saver* s = reinterpret_cast<version_set::Saver*>(arg);
MergeContext* merge_contex = s->merge_context;
std::string merge_result; // temporary area for merge results later
@@ -527,7 +518,7 @@ void Version::Get(const ReadOptions& options,
Slice user_key = k.user_key();
assert(status->ok() || status->IsMergeInProgress());
Saver saver;
version_set::Saver saver;
saver.state = status->ok()? kNotFound : kMerge;
saver.ucmp = user_comparator_;
saver.user_key = user_key;

View File

@@ -45,7 +45,6 @@ namespace rocksdb {
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
namespace {
typedef BlockBasedTableOptions::IndexType IndexType;
@@ -335,8 +334,6 @@ Slice CompressBlock(const Slice& raw,
return raw;
}
} // anonymous namespace
// kBlockBasedTableMagicNumber was picked by running
// echo rocksdb.table.block_based | sha1sum
// and taking the leading 64 bits.

View File

@@ -23,7 +23,7 @@
#include "util/autovector.h"
namespace rocksdb {
namespace {
namespace merger {
typedef std::priority_queue<
IteratorWrapper*,
std::vector<IteratorWrapper*>,
@@ -43,7 +43,7 @@ MaxIterHeap NewMaxIterHeap(const Comparator* comparator) {
MinIterHeap NewMinIterHeap(const Comparator* comparator) {
return MinIterHeap(MinIteratorComparator(comparator));
}
} // namespace
} // namespace merger
const size_t kNumIterReserve = 4;
@@ -56,8 +56,8 @@ class MergingIterator : public Iterator {
current_(nullptr),
use_heap_(true),
direction_(kForward),
maxHeap_(NewMaxIterHeap(comparator_)),
minHeap_(NewMinIterHeap(comparator_)) {
maxHeap_(merger::NewMaxIterHeap(comparator_)),
minHeap_(merger::NewMinIterHeap(comparator_)) {
children_.resize(n);
for (int i = 0; i < n; i++) {
children_[i].Set(children[i]);
@@ -274,8 +274,8 @@ class MergingIterator : public Iterator {
kReverse
};
Direction direction_;
MaxIterHeap maxHeap_;
MinIterHeap minHeap_;
merger::MaxIterHeap maxHeap_;
merger::MinIterHeap minHeap_;
};
void MergingIterator::FindSmallest() {
@@ -302,8 +302,8 @@ void MergingIterator::FindLargest() {
void MergingIterator::ClearHeaps() {
use_heap_ = true;
maxHeap_ = NewMaxIterHeap(comparator_);
minHeap_ = NewMinIterHeap(comparator_);
maxHeap_ = merger::NewMaxIterHeap(comparator_);
minHeap_ = merger::NewMinIterHeap(comparator_);
}
Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n,

View File

@@ -15,9 +15,6 @@
namespace rocksdb {
namespace {
static uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
class BloomFilterPolicy : public FilterPolicy {
private:

View File

@@ -14,9 +14,6 @@
namespace rocksdb {
namespace {
static uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
uint32_t GetNumBlocks(uint32_t total_bits) {
uint32_t num_blocks = (total_bits + CACHE_LINE_SIZE * 8 - 1) /

View File

@@ -17,4 +17,8 @@ namespace rocksdb {
extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
inline uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
}