20#include <xrpld/nodestore/detail/codec.h>
21#include <xrpld/unity/rocksdb.h>
23#include <xrpl/basics/contract.h>
24#include <xrpl/beast/clock/basic_seconds_clock.h>
25#include <xrpl/beast/core/LexicalCast.h>
26#include <xrpl/beast/rfc2616.h>
27#include <xrpl/beast/unit_test.h>
29#include <boost/beast/core/string.hpp>
30#include <boost/regex.hpp>
32#include <nudb/create.hpp>
33#include <nudb/detail/format.hpp>
34#include <nudb/xxhasher.hpp>
94template <
class Rep,
class Period>
112 os << round<nanoseconds>(d).count();
128 os << round<microseconds>(d).count();
144 os << round<milliseconds>(d).count();
160 os << round<seconds>(d).count();
176 os << round<minutes>(d).count();
183template <
class Period,
class Rep>
222 auto const elapsed = now -
start_;
233 auto const rate = elapsed.count() / double(work);
236 log <<
"Remaining: " <<
detail::fmtdur(remain) <<
" (" << work <<
" of "
255 static boost::regex
const re1(
258 "([a-zA-Z][_a-zA-Z0-9]*)"
265 boost::regex_constants::optimize);
268 for (
auto const& kv : v)
271 if (!boost::regex_match(kv, m, re1))
272 Throw<std::runtime_error>(
"invalid parameter " + kv);
275 Throw<std::runtime_error>(
"duplicate parameter " + m[1]);
282#if RIPPLE_ROCKSDB_AVAILABLE
292 using namespace nudb;
293 using namespace nudb::detail;
296 auto const args = parse_args(arg());
297 bool usage = args.empty();
299 if (!usage && args.find(
"from") == args.end())
301 log <<
"Missing parameter: from";
304 if (!usage && args.find(
"to") == args.end())
306 log <<
"Missing parameter: to";
309 if (!usage && args.find(
"buffer") == args.end())
311 log <<
"Missing parameter: buffer";
318 <<
"--unittest-arg=from=<from>,to=<to>,buffer=<buffer>\n"
319 <<
"from: RocksDB database to import from\n"
320 <<
"to: NuDB database to import to\n"
321 <<
"buffer: Buffer size (bigger is faster)\n"
322 <<
"NuDB database must not already exist.";
331 auto const from_path = args.at(
"from");
332 auto const to_path = args.at(
"to");
334 using hash_type = nudb::xxhasher;
335 auto const bulk_size = 64 * 1024 * 1024;
336 float const load_factor = 0.5;
338 auto const dp = to_path +
".dat";
339 auto const kp = to_path +
".key";
343 log <<
"from: " << from_path
353 rocksdb::Options options;
354 options.create_if_missing =
false;
355 options.max_open_files = 2000;
356 rocksdb::DB* pdb =
nullptr;
358 rocksdb::DB::OpenForReadOnly(options, from_path, &pdb);
360 Throw<std::runtime_error>(
361 "Can't open '" + from_path +
"': " +
status.ToString());
367 dh.version = currentVersion;
374 df.create(file_mode::append, dp, ec);
376 Throw<nudb::system_error>(ec);
377 bulk_writer<native_file> dw(df, 0, bulk_size);
380 auto os = dw.prepare(dat_file_header::size, ec);
382 Throw<nudb::system_error>(ec);
385 rocksdb::ReadOptions options;
386 options.verify_checksums =
false;
387 options.fill_cache =
false;
391 for (it->SeekToFirst(); it->Valid(); it->Next())
393 if (it->key().size() != 32)
394 Throw<std::runtime_error>(
395 "Unexpected key size " +
397 void const*
const key = it->key().data();
398 void const*
const data = it->value().data();
399 auto const size = it->value().size();
409 BEAST_EXPECT(
check.second == size);
414 auto os = dw.prepare(
415 field<uint48_t>::size +
420 Throw<nudb::system_error>(ec);
421 write<uint48_t>(os,
out.second);
428 Throw<nudb::system_error>(ec);
431 log <<
"Import data: "
433 auto const df_size = df.size(ec);
435 Throw<nudb::system_error>(ec);
438 kh.version = currentVersion;
440 kh.appnum = dh.appnum;
442 kh.salt = make_salt();
443 kh.pepper = pepper<hash_type>(kh.salt);
444 kh.block_size = block_size(kp);
445 kh.load_factor = std::min<std::size_t>(65536.0 * load_factor, 65535);
447 std::ceil(nitems / (bucket_capacity(kh.block_size) * load_factor));
448 kh.modulus = ceil_pow2(kh.buckets);
450 kf.create(file_mode::append, kp, ec);
452 Throw<nudb::system_error>(ec);
453 buffer buf(kh.block_size);
456 ostream os(buf.get(), kh.block_size);
458 kf.write(0, buf.get(), kh.block_size, ec);
460 Throw<nudb::system_error>(ec);
466 std::max<std::size_t>(1, buffer_size / kh.block_size);
467 buf.reserve(buckets * kh.block_size);
468 auto const passes = (kh.buckets + buckets - 1) / buckets;
469 log <<
"items: " << nitems
479 progress p(df_size * passes);
481 for (
std::size_t b0 = 0; b0 < kh.buckets; b0 += buckets)
483 auto const b1 =
std::min(b0 + buckets, kh.buckets);
485 auto const bn = b1 - b0;
489 bucket b(kh.block_size, buf.get() + i * kh.block_size, empty);
493 bulk_reader<native_file> r(
494 df, dat_file_header::size, df_size, bulk_size);
497 auto const offset = r.offset();
500 auto is = r.prepare(field<uint48_t>::size, ec);
502 Throw<nudb::system_error>(ec);
503 read<uint48_t>(is, size);
512 Throw<nudb::system_error>(ec);
515 auto const n = bucket_index(h, kh.buckets, kh.modulus);
516 p(log, npass * df_size + r.offset());
517 if (n < b0 || n >= b1)
520 kh.block_size, buf.get() + (n - b0) * kh.block_size);
521 maybe_spill(b, dw, ec);
523 Throw<nudb::system_error>(ec);
524 b.insert(offset, size, h);
530 is = r.prepare(field<std::uint16_t>::size, ec);
532 Throw<nudb::system_error>(ec);
533 read<std::uint16_t>(is, size);
536 Throw<nudb::system_error>(ec);
540 (b0 + 1) * kh.block_size, buf.get(), bn * kh.block_size, ec);
542 Throw<nudb::system_error>(ec);
547 Throw<nudb::system_error>(ec);
552BEAST_DEFINE_TESTSUITE_MANUAL(
import, NodeStore,
ripple);
A clock whose minimum resolution is one second.
typename Clock::duration duration
typename Clock::time_point time_point
save_stream_state(save_stream_state const &)=delete
save_stream_state & operator=(save_stream_state const &)=delete
save_stream_state(std::ostream &os)
std::ios::fmtflags flags_
std::ios::char_type fill_
std::streamsize precision_
void operator()(Log &log, std::size_t work)
clock_type::time_point start_
clock_type::time_point report_
progress(std::size_t work)
clock_type::time_point now_
void check(bool condition, std::string const &message)
Result split(FwdIt first, FwdIt last, Char delim)
Parse a character sequence of values separated by commas.
std::ostream & pretty_time(std::ostream &os, std::chrono::duration< Rep, Period > d)
std::string fmtdur(std::chrono::duration< Period, Rep > const &d)
std::map< std::string, std::string, boost::beast::iless > parse_args(std::string const &s)
void filter_inner(void *in, std::size_t in_size)
void write(nudb::detail::ostream &os, std::size_t t)
std::pair< void const *, std::size_t > nodeobject_decompress(void const *in, std::size_t in_size, BufferFactory &&bf)
std::pair< void const *, std::size_t > nodeobject_compress(void const *in, std::size_t in_size, BufferFactory &&bf)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
int run(int argc, char **argv)
T setprecision(T... args)