20#include <xrpld/nodestore/detail/codec.h>
21#include <xrpld/unity/rocksdb.h>
22#include <xrpl/basics/contract.h>
23#include <xrpl/beast/clock/basic_seconds_clock.h>
24#include <xrpl/beast/core/LexicalCast.h>
25#include <xrpl/beast/rfc2616.h>
26#include <xrpl/beast/unit_test.h>
28#include <boost/beast/core/string.hpp>
29#include <boost/regex.hpp>
30#include <nudb/create.hpp>
31#include <nudb/detail/format.hpp>
32#include <nudb/xxhasher.hpp>
92template <
class Rep,
class Period>
110 os << round<nanoseconds>(d).count();
126 os << round<microseconds>(d).count();
142 os << round<milliseconds>(d).count();
158 os << round<seconds>(d).count();
174 os << round<minutes>(d).count();
181template <
class Period,
class Rep>
220 auto const elapsed = now -
start_;
231 auto const rate = elapsed.count() / double(work);
234 log <<
"Remaining: " <<
detail::fmtdur(remain) <<
" (" << work <<
" of "
253 static boost::regex
const re1(
256 "([a-zA-Z][_a-zA-Z0-9]*)"
263 boost::regex_constants::optimize);
266 for (
auto const& kv : v)
269 if (!boost::regex_match(kv, m, re1))
270 Throw<std::runtime_error>(
"invalid parameter " + kv);
273 Throw<std::runtime_error>(
"duplicate parameter " + m[1]);
280#if RIPPLE_ROCKSDB_AVAILABLE
290 using namespace nudb;
291 using namespace nudb::detail;
294 auto const args = parse_args(arg());
295 bool usage = args.empty();
297 if (!usage && args.find(
"from") == args.end())
299 log <<
"Missing parameter: from";
302 if (!usage && args.find(
"to") == args.end())
304 log <<
"Missing parameter: to";
307 if (!usage && args.find(
"buffer") == args.end())
309 log <<
"Missing parameter: buffer";
316 <<
"--unittest-arg=from=<from>,to=<to>,buffer=<buffer>\n"
317 <<
"from: RocksDB database to import from\n"
318 <<
"to: NuDB database to import to\n"
319 <<
"buffer: Buffer size (bigger is faster)\n"
320 <<
"NuDB database must not already exist.";
329 auto const from_path = args.at(
"from");
330 auto const to_path = args.at(
"to");
332 using hash_type = nudb::xxhasher;
333 auto const bulk_size = 64 * 1024 * 1024;
334 float const load_factor = 0.5;
336 auto const dp = to_path +
".dat";
337 auto const kp = to_path +
".key";
341 log <<
"from: " << from_path
351 rocksdb::Options options;
352 options.create_if_missing =
false;
353 options.max_open_files = 2000;
354 rocksdb::DB* pdb =
nullptr;
356 rocksdb::DB::OpenForReadOnly(options, from_path, &pdb);
358 Throw<std::runtime_error>(
359 "Can't open '" + from_path +
"': " +
status.ToString());
365 dh.version = currentVersion;
372 df.create(file_mode::append, dp, ec);
374 Throw<nudb::system_error>(ec);
375 bulk_writer<native_file> dw(df, 0, bulk_size);
378 auto os = dw.prepare(dat_file_header::size, ec);
380 Throw<nudb::system_error>(ec);
383 rocksdb::ReadOptions options;
384 options.verify_checksums =
false;
385 options.fill_cache =
false;
389 for (it->SeekToFirst(); it->Valid(); it->Next())
391 if (it->key().size() != 32)
392 Throw<std::runtime_error>(
393 "Unexpected key size " +
395 void const*
const key = it->key().data();
396 void const*
const data = it->value().data();
397 auto const size = it->value().size();
407 BEAST_EXPECT(
check.second == size);
412 auto os = dw.prepare(
413 field<uint48_t>::size +
418 Throw<nudb::system_error>(ec);
419 write<uint48_t>(os,
out.second);
426 Throw<nudb::system_error>(ec);
429 log <<
"Import data: "
431 auto const df_size = df.size(ec);
433 Throw<nudb::system_error>(ec);
436 kh.version = currentVersion;
438 kh.appnum = dh.appnum;
440 kh.salt = make_salt();
441 kh.pepper = pepper<hash_type>(kh.salt);
442 kh.block_size = block_size(kp);
443 kh.load_factor = std::min<std::size_t>(65536.0 * load_factor, 65535);
445 std::ceil(nitems / (bucket_capacity(kh.block_size) * load_factor));
446 kh.modulus = ceil_pow2(kh.buckets);
448 kf.create(file_mode::append, kp, ec);
450 Throw<nudb::system_error>(ec);
451 buffer buf(kh.block_size);
454 ostream os(buf.get(), kh.block_size);
456 kf.write(0, buf.get(), kh.block_size, ec);
458 Throw<nudb::system_error>(ec);
464 std::max<std::size_t>(1, buffer_size / kh.block_size);
465 buf.reserve(buckets * kh.block_size);
466 auto const passes = (kh.buckets + buckets - 1) / buckets;
467 log <<
"items: " << nitems
477 progress p(df_size * passes);
479 for (
std::size_t b0 = 0; b0 < kh.buckets; b0 += buckets)
481 auto const b1 =
std::min(b0 + buckets, kh.buckets);
483 auto const bn = b1 - b0;
487 bucket b(kh.block_size, buf.get() + i * kh.block_size, empty);
491 bulk_reader<native_file> r(
492 df, dat_file_header::size, df_size, bulk_size);
495 auto const offset = r.offset();
498 auto is = r.prepare(field<uint48_t>::size, ec);
500 Throw<nudb::system_error>(ec);
501 read<uint48_t>(is, size);
510 Throw<nudb::system_error>(ec);
513 auto const n = bucket_index(h, kh.buckets, kh.modulus);
514 p(log, npass * df_size + r.offset());
515 if (n < b0 || n >= b1)
518 kh.block_size, buf.get() + (n - b0) * kh.block_size);
519 maybe_spill(b, dw, ec);
521 Throw<nudb::system_error>(ec);
522 b.insert(offset, size, h);
528 is = r.prepare(field<std::uint16_t>::size, ec);
530 Throw<nudb::system_error>(ec);
531 read<std::uint16_t>(is, size);
534 Throw<nudb::system_error>(ec);
538 (b0 + 1) * kh.block_size, buf.get(), bn * kh.block_size, ec);
540 Throw<nudb::system_error>(ec);
545 Throw<nudb::system_error>(ec);
550BEAST_DEFINE_TESTSUITE_MANUAL(
import, NodeStore,
ripple);
A clock whose minimum resolution is one second.
typename Clock::duration duration
typename Clock::time_point time_point
save_stream_state(save_stream_state const &)=delete
save_stream_state & operator=(save_stream_state const &)=delete
save_stream_state(std::ostream &os)
std::ios::fmtflags flags_
std::ios::char_type fill_
std::streamsize precision_
void operator()(Log &log, std::size_t work)
clock_type::time_point start_
clock_type::time_point report_
progress(std::size_t work)
clock_type::time_point now_
void check(bool condition, std::string const &message)
Result split(FwdIt first, FwdIt last, Char delim)
Parse a character sequence of values separated by commas.
std::ostream & pretty_time(std::ostream &os, std::chrono::duration< Rep, Period > d)
std::string fmtdur(std::chrono::duration< Period, Rep > const &d)
std::map< std::string, std::string, boost::beast::iless > parse_args(std::string const &s)
void filter_inner(void *in, std::size_t in_size)
void write(nudb::detail::ostream &os, std::size_t t)
std::pair< void const *, std::size_t > nodeobject_decompress(void const *in, std::size_t in_size, BufferFactory &&bf)
std::pair< void const *, std::size_t > nodeobject_compress(void const *in, std::size_t in_size, BufferFactory &&bf)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
int run(int argc, char **argv)
T setprecision(T... args)