mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Update src/rocksdb2 to rocksdb-3.5.1:
Merge commit 'c168d54495d7d7b84639514f6443ad99b89ce996' into develop
This commit is contained in:
@@ -37,11 +37,6 @@ echo "Running db sanity check with commits $commit_new and $commit_old."
|
||||
|
||||
echo "============================================================="
|
||||
echo "Making build $commit_new"
|
||||
git checkout $commit_new
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "[ERROR] Can't checkout $commit_new"
|
||||
exit 1
|
||||
fi
|
||||
makestuff
|
||||
mv db_sanity_test new_db_sanity_test
|
||||
echo "Creating db based on the new commit --- $commit_new"
|
||||
@@ -49,11 +44,6 @@ echo "Creating db based on the new commit --- $commit_new"
|
||||
|
||||
echo "============================================================="
|
||||
echo "Making build $commit_old"
|
||||
git checkout $commit_old
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "[ERROR] Can't checkout $commit_old"
|
||||
exit 1
|
||||
fi
|
||||
makestuff
|
||||
mv db_sanity_test old_db_sanity_test
|
||||
echo "Creating db based on the old commit --- $commit_old"
|
||||
|
||||
@@ -1,205 +0,0 @@
|
||||
#!/bin/bash
|
||||
# REQUIRE: db_bench binary exists in the current directory
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "./benchmark.sh [bulkload/fillseq/overwrite/filluniquerandom/readrandom/readwhilewriting]"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# size constants
|
||||
K=1024
|
||||
M=$((1024 * K))
|
||||
G=$((1024 * M))
|
||||
|
||||
if [ -z $DB_DIR ]; then
|
||||
echo "DB_DIR is not defined"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -z $WAL_DIR ]; then
|
||||
echo "WAL_DIR is not defined"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
output_dir=${OUTPUT_DIR:-/tmp/}
|
||||
if [ ! -d $output_dir ]; then
|
||||
mkdir -p $output_dir
|
||||
fi
|
||||
|
||||
num_read_threads=${NUM_READ_THREADS:-16}
|
||||
writes_per_second=${WRITES_PER_SEC:-$((80 * K))} # (only for readwhilewriting)
|
||||
cache_size=$((16 * G))
|
||||
duration=${DURATION:-0}
|
||||
|
||||
num_keys=${NUM_KEYS:-$((1 * G))}
|
||||
key_size=20
|
||||
value_size=800
|
||||
|
||||
const_params="
|
||||
--db=$DB_DIR \
|
||||
--wal_dir=$WAL_DIR \
|
||||
\
|
||||
--num_levels=6 \
|
||||
--key_size=$key_size \
|
||||
--value_size=$value_size \
|
||||
--block_size=4096 \
|
||||
--cache_size=$cache_size \
|
||||
--cache_numshardbits=6 \
|
||||
--compression_type=snappy \
|
||||
--compression_ratio=0.5 \
|
||||
\
|
||||
--hard_rate_limit=2 \
|
||||
--rate_limit_delay_max_milliseconds=1000000 \
|
||||
--write_buffer_size=$((128 * M)) \
|
||||
--max_write_buffer_number=2 \
|
||||
--target_file_size_base=$((128 * M)) \
|
||||
--max_bytes_for_level_base=$((1 * G)) \
|
||||
\
|
||||
--sync=0 \
|
||||
--disable_data_sync=1 \
|
||||
--verify_checksum=1 \
|
||||
--delete_obsolete_files_period_micros=$((60 * M)) \
|
||||
--max_grandparent_overlap_factor=10 \
|
||||
\
|
||||
--statistics=1 \
|
||||
--stats_per_interval=1 \
|
||||
--stats_interval=$((1 * M)) \
|
||||
--histogram=1 \
|
||||
\
|
||||
--memtablerep=skip_list \
|
||||
--bloom_bits=10 \
|
||||
--open_files=$((20 * K))"
|
||||
|
||||
l0_config="
|
||||
--level0_file_num_compaction_trigger=8 \
|
||||
--level0_slowdown_writes_trigger=16 \
|
||||
--level0_stop_writes_trigger=24"
|
||||
|
||||
if [ $duration -gt 0 ]; then
|
||||
const_params="$const_params --duration=$duration"
|
||||
fi
|
||||
|
||||
params_r="$const_params $l0_config --max_background_compactions=4 --max_background_flushes=1"
|
||||
params_w="$const_params $l0_config --max_background_compactions=16 --max_background_flushes=16"
|
||||
params_bulkload="$const_params --max_background_compactions=16 --max_background_flushes=16 \
|
||||
--level0_file_num_compaction_trigger=$((100 * M)) \
|
||||
--level0_slowdown_writes_trigger=$((100 * M)) \
|
||||
--level0_stop_writes_trigger=$((100 * M))"
|
||||
|
||||
function run_bulkload {
|
||||
echo "Bulk loading $num_keys random keys into database..."
|
||||
cmd="./db_bench $params_bulkload --benchmarks=fillrandom \
|
||||
--use_existing_db=0 \
|
||||
--num=$num_keys \
|
||||
--disable_auto_compactions=1 \
|
||||
--disable_data_sync=1 \
|
||||
--threads=1 2>&1 | tee $output_dir/benchmark_bulkload_fillrandom.log"
|
||||
echo $cmd | tee $output_dir/benchmark_bulkload_fillrandom.log
|
||||
eval $cmd
|
||||
echo "Compacting..."
|
||||
cmd="./db_bench $params_w --benchmarks=compact \
|
||||
--use_existing_db=1 \
|
||||
--num=$num_keys \
|
||||
--disable_auto_compactions=1 \
|
||||
--disable_data_sync=1 \
|
||||
--threads=1 2>&1 | tee $output_dir/benchmark_bulkload_compact.log"
|
||||
echo $cmd | tee $output_dir/benchmark_bulkload_compact.log
|
||||
eval $cmd
|
||||
}
|
||||
|
||||
function run_fillseq {
|
||||
echo "Loading $num_keys keys sequentially into database..."
|
||||
cmd="./db_bench $params_w --benchmarks=fillseq \
|
||||
--use_existing_db=0 \
|
||||
--num=$num_keys \
|
||||
--threads=1 2>&1 | tee $output_dir/benchmark_fillseq.log"
|
||||
echo $cmd | tee $output_dir/benchmark_fillseq.log
|
||||
eval $cmd
|
||||
}
|
||||
|
||||
function run_overwrite {
|
||||
echo "Loading $num_keys keys sequentially into database..."
|
||||
cmd="./db_bench $params_w --benchmarks=overwrite \
|
||||
--use_existing_db=1 \
|
||||
--num=$num_keys \
|
||||
--threads=1 2>&1 | tee $output_dir/benchmark_overwrite.log"
|
||||
echo $cmd | tee $output_dir/benchmark_overwrite.log
|
||||
eval $cmd
|
||||
}
|
||||
|
||||
function run_filluniquerandom {
|
||||
echo "Loading $num_keys unique keys randomly into database..."
|
||||
cmd="./db_bench $params_w --benchmarks=filluniquerandom \
|
||||
--use_existing_db=0 \
|
||||
--num=$num_keys \
|
||||
--threads=1 2>&1 | tee $output_dir/benchmark_filluniquerandom.log"
|
||||
echo $cmd | tee $output_dir/benchmark_filluniquerandom.log
|
||||
eval $cmd
|
||||
}
|
||||
|
||||
function run_readrandom {
|
||||
echo "Reading $num_keys random keys from database..."
|
||||
cmd="./db_bench $params_r --benchmarks=readrandom \
|
||||
--use_existing_db=1 \
|
||||
--num=$num_keys \
|
||||
--threads=$num_read_threads \
|
||||
--disable_auto_compactions=1 \
|
||||
2>&1 | tee $output_dir/benchmark_readrandom.log"
|
||||
echo $cmd | tee $output_dir/benchmark_readrandom.log
|
||||
eval $cmd
|
||||
}
|
||||
|
||||
function run_readwhilewriting {
|
||||
echo "Reading $num_keys random keys from database whiling writing.."
|
||||
cmd="./db_bench $params_r --benchmarks=readwhilewriting \
|
||||
--use_existing_db=1 \
|
||||
--num=$num_keys \
|
||||
--threads=$num_read_threads \
|
||||
--writes_per_second=$writes_per_second \
|
||||
2>&1 | tee $output_dir/benchmark_readwhilewriting.log"
|
||||
echo $cmd | tee $output_dir/benchmark_readwhilewriting.log
|
||||
eval $cmd
|
||||
}
|
||||
|
||||
function now() {
|
||||
echo `date +"%s"`
|
||||
}
|
||||
|
||||
report="$output_dir/report.txt"
|
||||
|
||||
# print start time
|
||||
echo "===== Benchmark ====="
|
||||
|
||||
# Run!!!
|
||||
IFS=',' read -a jobs <<< $1
|
||||
for job in ${jobs[@]}; do
|
||||
echo "Start $job at `date`" | tee -a $report
|
||||
start=$(now)
|
||||
if [ $job = bulkload ]; then
|
||||
run_bulkload
|
||||
elif [ $job = fillseq ]; then
|
||||
run_fillseq
|
||||
elif [ $job = overwrite ]; then
|
||||
run_overwrite
|
||||
elif [ $job = filluniquerandom ]; then
|
||||
run_filluniquerandom
|
||||
elif [ $job = readrandom ]; then
|
||||
run_readrandom
|
||||
elif [ $job = readwhilewriting ]; then
|
||||
run_readwhilewriting
|
||||
else
|
||||
echo "unknown job $job"
|
||||
exit
|
||||
fi
|
||||
end=$(now)
|
||||
|
||||
echo "Complete $job in $((end-start)) seconds" | tee -a $report
|
||||
if [[ $job = readrandom || $job = readwhilewriting ]]; then
|
||||
qps=$(grep "micros\/op" "$output_dir/benchmark_$job.log" | grep "ops\/sec" | awk '{print $5}')
|
||||
line=$(grep "rocksdb.db.get.micros" "$output_dir/benchmark_$job.log")
|
||||
p50=$(echo $line | awk '{print $7}')
|
||||
p99=$(echo $line | awk '{print $13}')
|
||||
echo "Read latency p50 = $p50 us, p99 = $p99 us" | tee -a $report
|
||||
echo "QPS = $qps ops/sec" | tee -a $report
|
||||
fi
|
||||
done
|
||||
@@ -8,15 +8,14 @@
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/status.h"
|
||||
#include "rocksdb/comparator.h"
|
||||
#include "rocksdb/table.h"
|
||||
#include "rocksdb/slice_transform.h"
|
||||
#include "rocksdb/filter_policy.h"
|
||||
#include "include/rocksdb/db.h"
|
||||
#include "include/rocksdb/options.h"
|
||||
#include "include/rocksdb/env.h"
|
||||
#include "include/rocksdb/slice.h"
|
||||
#include "include/rocksdb/status.h"
|
||||
#include "include/rocksdb/comparator.h"
|
||||
#include "include/rocksdb/table.h"
|
||||
#include "include/rocksdb/slice_transform.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
@@ -50,7 +49,7 @@ class SanityTest {
|
||||
return s;
|
||||
}
|
||||
}
|
||||
return db->Flush(FlushOptions());
|
||||
return Status::OK();
|
||||
}
|
||||
Status Verify() {
|
||||
DB* db;
|
||||
@@ -147,29 +146,13 @@ class SanityTestPlainTableFactory : public SanityTest {
|
||||
Options options_;
|
||||
};
|
||||
|
||||
class SanityTestBloomFilter : public SanityTest {
|
||||
public:
|
||||
explicit SanityTestBloomFilter(const std::string& path) : SanityTest(path) {
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.filter_policy.reset(NewBloomFilterPolicy(10));
|
||||
options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
}
|
||||
~SanityTestBloomFilter() {}
|
||||
virtual Options GetOptions() const { return options_; }
|
||||
virtual std::string Name() const { return "BloomFilter"; }
|
||||
|
||||
private:
|
||||
Options options_;
|
||||
};
|
||||
|
||||
namespace {
|
||||
bool RunSanityTests(const std::string& command, const std::string& path) {
|
||||
std::vector<SanityTest*> sanity_tests = {
|
||||
new SanityTestBasic(path),
|
||||
new SanityTestSpecialComparator(path),
|
||||
new SanityTestZlibCompression(path),
|
||||
new SanityTestPlainTableFactory(path),
|
||||
new SanityTestBloomFilter(path)};
|
||||
new SanityTestPlainTableFactory(path)};
|
||||
|
||||
if (command == "create") {
|
||||
fprintf(stderr, "Creating...\n");
|
||||
|
||||
@@ -209,9 +209,6 @@ static const bool FLAGS_reopen_dummy __attribute__((unused)) =
|
||||
DEFINE_int32(bloom_bits, 10, "Bloom filter bits per key. "
|
||||
"Negative means use default settings.");
|
||||
|
||||
DEFINE_bool(use_block_based_filter, false, "use block based filter"
|
||||
"instead of full filter for block based table");
|
||||
|
||||
DEFINE_string(db, "", "Use the db with the following name.");
|
||||
|
||||
DEFINE_bool(verify_checksum, false,
|
||||
@@ -760,10 +757,8 @@ class StressTest {
|
||||
? NewLRUCache(FLAGS_compressed_cache_size)
|
||||
: nullptr),
|
||||
filter_policy_(FLAGS_bloom_bits >= 0
|
||||
? FLAGS_use_block_based_filter
|
||||
? NewBloomFilterPolicy(FLAGS_bloom_bits, true)
|
||||
: NewBloomFilterPolicy(FLAGS_bloom_bits, false)
|
||||
: nullptr),
|
||||
? NewBloomFilterPolicy(FLAGS_bloom_bits)
|
||||
: nullptr),
|
||||
db_(nullptr),
|
||||
new_column_family_name_(1),
|
||||
num_times_reopened_(0) {
|
||||
|
||||
@@ -76,7 +76,6 @@ Status ReduceLevelTest::OpenDB(bool create_if_missing, int num_levels,
|
||||
opt.num_levels = num_levels;
|
||||
opt.create_if_missing = create_if_missing;
|
||||
opt.max_mem_compaction_level = mem_table_compact_level;
|
||||
opt.max_background_flushes = 0;
|
||||
rocksdb::Status st = rocksdb::DB::Open(opt, dbname_, &db_);
|
||||
if (!st.ok()) {
|
||||
fprintf(stderr, "Can't open the db:%s\n", st.ToString().c_str());
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
# REQUIRE: benchmark.sh exists in the current directory
|
||||
# After execution of this script, log files are generated in $output_dir.
|
||||
# report.txt provides a high level statistics
|
||||
|
||||
# Size constants
|
||||
K=1024
|
||||
M=$((1024 * K))
|
||||
G=$((1024 * M))
|
||||
|
||||
n=$((1 * G))
|
||||
wps=$((80 * K))
|
||||
duration=$((6 * 60 * 60))
|
||||
num_read_threads=24
|
||||
|
||||
# Update these parameters before execution !!!
|
||||
db_dir="/tmp/rocksdb/"
|
||||
wal_dir="/tmp/rocksdb/"
|
||||
output_dir="/tmp/output"
|
||||
|
||||
# Test 1: bulk load
|
||||
OUTPUT_DIR=$output_dir NUM_KEYS=$n DB_DIR=$db_dir WAL_DIR=$wal_dir \
|
||||
./benchmark.sh bulkload
|
||||
|
||||
# Test 2: sequential fill
|
||||
OUTPUT_DIR=$output_dir NUM_KEYS=$n DB_DIR=$db_dir WAL_DIR=$wal_dir \
|
||||
./benchmark.sh fillseq
|
||||
|
||||
# Test 3: overwrite
|
||||
OUTPUT_DIR=$output_dir NUM_KEYS=$n DB_DIR=$db_dir WAL_DIR=$wal_dir \
|
||||
./benchmark.sh overwrite
|
||||
|
||||
# Prepare: populate DB with random data
|
||||
OUTPUT_DIR=$output_dir NUM_KEYS=$n DB_DIR=$db_dir WAL_DIR=$wal_dir \
|
||||
./benchmark.sh filluniquerandom
|
||||
|
||||
# Test 4: random read
|
||||
OUTPUT_DIR=$output_dir NUM_KEYS=$n DB_DIR=$db_dir WAL_DIR=$wal_dir \
|
||||
DURATION=$duration NUM_READ_THREADS=$num_read_threads \
|
||||
./benchmark.sh readrandom
|
||||
|
||||
# Test 5: random read while writing
|
||||
OUTPUT_DIR=$output_dir NUM_KEYS=$n DB_DIR=$db_dir WAL_DIR=$wal_dir \
|
||||
DURATION=$duration NUM_READ_THREADS=$num_read_threads WRITES_PER_SECOND=$wps \
|
||||
./benchmark.sh readwhilewriting
|
||||
@@ -68,7 +68,6 @@ class SstFileReader {
|
||||
// options_ and internal_comparator_ will also be used in
|
||||
// ReadSequential internally (specifically, seek-related operations)
|
||||
Options options_;
|
||||
const ImmutableCFOptions ioptions_;
|
||||
InternalKeyComparator internal_comparator_;
|
||||
unique_ptr<TableProperties> table_properties_;
|
||||
};
|
||||
@@ -77,8 +76,7 @@ SstFileReader::SstFileReader(const std::string& file_path,
|
||||
bool verify_checksum,
|
||||
bool output_hex)
|
||||
:file_name_(file_path), read_num_(0), verify_checksum_(verify_checksum),
|
||||
output_hex_(output_hex), ioptions_(options_),
|
||||
internal_comparator_(BytewiseComparator()) {
|
||||
output_hex_(output_hex), internal_comparator_(BytewiseComparator()) {
|
||||
fprintf(stdout, "Process %s\n", file_path.c_str());
|
||||
|
||||
init_result_ = NewTableReader(file_name_);
|
||||
@@ -125,7 +123,7 @@ Status SstFileReader::NewTableReader(const std::string& file_path) {
|
||||
|
||||
if (s.ok()) {
|
||||
s = options_.table_factory->NewTableReader(
|
||||
ioptions_, soptions_, internal_comparator_, std::move(file_), file_size,
|
||||
options_, soptions_, internal_comparator_, std::move(file_), file_size,
|
||||
&table_reader_);
|
||||
}
|
||||
return s;
|
||||
|
||||
Reference in New Issue
Block a user