mirror of
https://github.com/XRPLF/rippled.git
synced 2026-01-12 02:35:22 +00:00
RocksDB changes to support unity build:
* Remove extra definition of TotalFileSize * Remove extra definition of ClipToRange * Move EncodedFileMetaData out from anonymous namespace * Move version_set Saver to its own namespace * Move some symbols into a named namespace * Move symbols out of anonymous namespace (prevents warning) * Make BloomHash inline
This commit is contained in:
@@ -49,15 +49,6 @@ ColumnFamilyHandleImpl::~ColumnFamilyHandleImpl() {
|
||||
|
||||
uint32_t ColumnFamilyHandleImpl::GetID() const { return cfd()->GetID(); }
|
||||
|
||||
namespace {
|
||||
// Fix user-supplied options to be reasonable
|
||||
template <class T, class V>
|
||||
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
|
||||
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
|
||||
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
ColumnFamilyOptions SanitizeOptions(const InternalKeyComparator* icmp,
|
||||
const InternalFilterPolicy* ipolicy,
|
||||
const ColumnFamilyOptions& src) {
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
|
||||
uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
|
||||
uint64_t sum = 0;
|
||||
for (size_t i = 0; i < files.size() && files[i]; i++) {
|
||||
sum += files[i]->fd.GetFileSize();
|
||||
@@ -26,6 +26,14 @@ static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
|
||||
return sum;
|
||||
}
|
||||
|
||||
uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
|
||||
uint64_t sum = 0;
|
||||
for (size_t i = 0; i < files.size() && files[i]; i++) {
|
||||
sum += files[i]->compensated_file_size;
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
Compaction::Compaction(Version* input_version, int level, int out_level,
|
||||
uint64_t target_file_size,
|
||||
uint64_t max_grandparent_overlap_bytes,
|
||||
|
||||
@@ -10,8 +10,12 @@
|
||||
#pragma once
|
||||
#include "db/version_set.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
struct FileMetaData;
|
||||
|
||||
class Version;
|
||||
class ColumnFamilyData;
|
||||
|
||||
@@ -164,4 +168,8 @@ class Compaction {
|
||||
void ResetNextCompactionIndex();
|
||||
};
|
||||
|
||||
// Utility functions
|
||||
extern uint64_t TotalFileSize(const std::vector<FileMetaData*>& files);
|
||||
extern uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files);
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
#include <limits>
|
||||
#include "db/compaction.h"
|
||||
#include "db/filename.h"
|
||||
#include "util/log_buffer.h"
|
||||
#include "util/statistics.h"
|
||||
@@ -45,14 +46,6 @@ CompressionType GetCompressionType(const Options& options, int level,
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
|
||||
uint64_t sum = 0;
|
||||
for (size_t i = 0; i < files.size() && files[i]; i++) {
|
||||
sum += files[i]->compensated_file_size;
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
// Multiple two operands. If they overflow, return op1.
|
||||
uint64_t MultiplyCheckOverflow(uint64_t op1, int op2) {
|
||||
if (op1 == 0) {
|
||||
|
||||
@@ -256,15 +256,6 @@ struct DBImpl::CompactionState {
|
||||
}
|
||||
};
|
||||
|
||||
namespace {
|
||||
// Fix user-supplied options to be reasonable
|
||||
template <class T, class V>
|
||||
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
|
||||
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
|
||||
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
Options SanitizeOptions(const std::string& dbname,
|
||||
const InternalKeyComparator* icmp,
|
||||
const InternalFilterPolicy* ipolicy,
|
||||
|
||||
@@ -644,4 +644,11 @@ extern Options SanitizeOptions(const std::string& db,
|
||||
const Options& src);
|
||||
extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);
|
||||
|
||||
// Fix user-supplied options to be reasonable
|
||||
template <class T, class V>
|
||||
inline void ClipToRange(T* ptr, V minvalue, V maxvalue) {
|
||||
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
|
||||
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <vector>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "db/compaction.h"
|
||||
#include "db/filename.h"
|
||||
#include "db/log_reader.h"
|
||||
#include "db/log_writer.h"
|
||||
@@ -40,23 +41,6 @@
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
|
||||
uint64_t sum = 0;
|
||||
for (size_t i = 0; i < files.size() && files[i]; i++) {
|
||||
sum += files[i]->fd.GetFileSize();
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
static uint64_t TotalCompensatedFileSize(
|
||||
const std::vector<FileMetaData*>& files) {
|
||||
uint64_t sum = 0;
|
||||
for (size_t i = 0; i < files.size() && files[i]; i++) {
|
||||
sum += files[i]->compensated_file_size;
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
Version::~Version() {
|
||||
assert(refs_ == 0);
|
||||
|
||||
@@ -373,7 +357,6 @@ void Version::AddIterators(const ReadOptions& read_options,
|
||||
}
|
||||
|
||||
// Callback from TableCache::Get()
|
||||
namespace {
|
||||
enum SaverState {
|
||||
kNotFound,
|
||||
kFound,
|
||||
@@ -381,6 +364,7 @@ enum SaverState {
|
||||
kCorrupt,
|
||||
kMerge // saver contains the current merge result (the operands)
|
||||
};
|
||||
namespace version_set {
|
||||
struct Saver {
|
||||
SaverState state;
|
||||
const Comparator* ucmp;
|
||||
@@ -393,7 +377,7 @@ struct Saver {
|
||||
Logger* logger;
|
||||
Statistics* statistics;
|
||||
};
|
||||
}
|
||||
} // namespace version_set
|
||||
|
||||
// Called from TableCache::Get and Table::Get when file/block in which
|
||||
// key may exist are not there in TableCache/BlockCache respectively. In this
|
||||
@@ -401,7 +385,7 @@ struct Saver {
|
||||
// IO to be certain.Set the status=kFound and value_found=false to let the
|
||||
// caller know that key may exist but is not there in memory
|
||||
static void MarkKeyMayExist(void* arg) {
|
||||
Saver* s = reinterpret_cast<Saver*>(arg);
|
||||
version_set::Saver* s = reinterpret_cast<version_set::Saver*>(arg);
|
||||
s->state = kFound;
|
||||
if (s->value_found != nullptr) {
|
||||
*(s->value_found) = false;
|
||||
@@ -410,7 +394,7 @@ static void MarkKeyMayExist(void* arg) {
|
||||
|
||||
static bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
|
||||
const Slice& v) {
|
||||
Saver* s = reinterpret_cast<Saver*>(arg);
|
||||
version_set::Saver* s = reinterpret_cast<version_set::Saver*>(arg);
|
||||
MergeContext* merge_contex = s->merge_context;
|
||||
std::string merge_result; // temporary area for merge results later
|
||||
|
||||
@@ -538,7 +522,7 @@ void Version::Get(const ReadOptions& options,
|
||||
Slice user_key = k.user_key();
|
||||
|
||||
assert(status->ok() || status->IsMergeInProgress());
|
||||
Saver saver;
|
||||
version_set::Saver saver;
|
||||
saver.state = status->ok()? kNotFound : kMerge;
|
||||
saver.ucmp = user_comparator_;
|
||||
saver.user_key = user_key;
|
||||
|
||||
@@ -45,7 +45,6 @@ namespace rocksdb {
|
||||
|
||||
extern const std::string kHashIndexPrefixesBlock;
|
||||
extern const std::string kHashIndexPrefixesMetadataBlock;
|
||||
namespace {
|
||||
|
||||
typedef BlockBasedTableOptions::IndexType IndexType;
|
||||
|
||||
@@ -335,8 +334,6 @@ Slice CompressBlock(const Slice& raw,
|
||||
return raw;
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// kBlockBasedTableMagicNumber was picked by running
|
||||
// echo rocksdb.table.block_based | sha1sum
|
||||
// and taking the leading 64 bits.
|
||||
|
||||
@@ -16,18 +16,10 @@
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
namespace {
|
||||
|
||||
inline uint32_t Hash(const Slice& s) {
|
||||
return rocksdb::Hash(s.data(), s.size(), 0);
|
||||
}
|
||||
|
||||
inline uint32_t PrefixToBucket(const Slice& prefix, uint32_t num_buckets) {
|
||||
return Hash(prefix) % num_buckets;
|
||||
return Hash(prefix.data(), prefix.size(), 0) % num_buckets;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// The prefix block index is simply a bucket array, with each entry pointing to
|
||||
// the blocks that span the prefixes hashed to this bucket.
|
||||
//
|
||||
@@ -74,8 +66,6 @@ struct PrefixRecord {
|
||||
PrefixRecord* next;
|
||||
};
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
class BlockPrefixIndex::Builder {
|
||||
public:
|
||||
explicit Builder(const SliceTransform* internal_prefix_extractor)
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#include "util/autovector.h"
|
||||
|
||||
namespace rocksdb {
|
||||
namespace {
|
||||
namespace merger {
|
||||
typedef std::priority_queue<
|
||||
IteratorWrapper*,
|
||||
std::vector<IteratorWrapper*>,
|
||||
@@ -43,7 +43,7 @@ MaxIterHeap NewMaxIterHeap(const Comparator* comparator) {
|
||||
MinIterHeap NewMinIterHeap(const Comparator* comparator) {
|
||||
return MinIterHeap(MinIteratorComparator(comparator));
|
||||
}
|
||||
} // namespace
|
||||
} // namespace merger
|
||||
|
||||
const size_t kNumIterReserve = 4;
|
||||
|
||||
@@ -56,8 +56,8 @@ class MergingIterator : public Iterator {
|
||||
current_(nullptr),
|
||||
use_heap_(true),
|
||||
direction_(kForward),
|
||||
maxHeap_(NewMaxIterHeap(comparator_)),
|
||||
minHeap_(NewMinIterHeap(comparator_)) {
|
||||
maxHeap_(merger::NewMaxIterHeap(comparator_)),
|
||||
minHeap_(merger::NewMinIterHeap(comparator_)) {
|
||||
children_.resize(n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
children_[i].Set(children[i]);
|
||||
@@ -274,8 +274,8 @@ class MergingIterator : public Iterator {
|
||||
kReverse
|
||||
};
|
||||
Direction direction_;
|
||||
MaxIterHeap maxHeap_;
|
||||
MinIterHeap minHeap_;
|
||||
merger::MaxIterHeap maxHeap_;
|
||||
merger::MinIterHeap minHeap_;
|
||||
};
|
||||
|
||||
void MergingIterator::FindSmallest() {
|
||||
@@ -302,8 +302,8 @@ void MergingIterator::FindLargest() {
|
||||
|
||||
void MergingIterator::ClearHeaps() {
|
||||
use_heap_ = true;
|
||||
maxHeap_ = NewMaxIterHeap(comparator_);
|
||||
minHeap_ = NewMinIterHeap(comparator_);
|
||||
maxHeap_ = merger::NewMaxIterHeap(comparator_);
|
||||
minHeap_ = merger::NewMinIterHeap(comparator_);
|
||||
}
|
||||
|
||||
Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n,
|
||||
|
||||
@@ -15,9 +15,6 @@
|
||||
namespace rocksdb {
|
||||
|
||||
namespace {
|
||||
static uint32_t BloomHash(const Slice& key) {
|
||||
return Hash(key.data(), key.size(), 0xbc9f1d34);
|
||||
}
|
||||
|
||||
class BloomFilterPolicy : public FilterPolicy {
|
||||
private:
|
||||
|
||||
@@ -14,9 +14,6 @@
|
||||
namespace rocksdb {
|
||||
|
||||
namespace {
|
||||
static uint32_t BloomHash(const Slice& key) {
|
||||
return Hash(key.data(), key.size(), 0xbc9f1d34);
|
||||
}
|
||||
|
||||
uint32_t GetTotalBitsForLocality(uint32_t total_bits) {
|
||||
uint32_t num_blocks =
|
||||
|
||||
@@ -17,4 +17,8 @@ namespace rocksdb {
|
||||
|
||||
extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
|
||||
|
||||
inline uint32_t BloomHash(const Slice& key) {
|
||||
return Hash(key.data(), key.size(), 0xbc9f1d34);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user