Use NuDB burst size and use NuDB version 2.0.5

This commit is contained in:
Miguel Portilla
2020-11-12 15:17:55 -05:00
committed by manojsdoshi
parent 0b4e34b03b
commit 8707c15b9c
18 changed files with 148 additions and 49 deletions

View File

@@ -12,7 +12,7 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build
FetchContent_Declare(
nudb_src
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
GIT_TAG 2.0.3
GIT_TAG 2.0.5
)
FetchContent_GetProperties(nudb_src)
if(NOT nudb_src_POPULATED)
@@ -23,7 +23,7 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build
ExternalProject_Add (nudb_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
GIT_TAG 2.0.3
GIT_TAG 2.0.5
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
TEST_COMMAND ""

View File

@@ -940,6 +940,8 @@ public:
std::unique_ptr<NodeStore::Database> source =
NodeStore::Manager::instance().make_Database(
"NodeStore.import",
megabytes(config_->getValueFor(
SizedItem::burstSize, boost::none)),
dummyScheduler,
0,
dummyRoot,

View File

@@ -259,6 +259,8 @@ SHAMapStoreImp::makeNodeStore(std::string const& name, std::int32_t readThreads)
{
db = NodeStore::Manager::instance().make_Database(
name,
megabytes(
app_.config().getValueFor(SizedItem::burstSize, boost::none)),
scheduler_,
readThreads,
app_.getJobQueue(),
@@ -562,7 +564,10 @@ SHAMapStoreImp::makeBackendRotating(std::string path)
section.set("path", newPath.string());
auto backend{NodeStore::Manager::instance().make_Backend(
section, scheduler_, app_.logs().journal(nodeStoreName_))};
section,
megabytes(app_.config().getValueFor(SizedItem::burstSize, boost::none)),
scheduler_,
app_.logs().journal(nodeStoreName_))};
backend->open();
return backend;
}

View File

@@ -57,7 +57,8 @@ enum class SizedItem : std::size_t {
hashNodeDBCache,
txnDBCache,
lgrDBCache,
openFinalLimit
openFinalLimit,
burstSize
};
// This entire derived class is deprecated.

View File

@@ -40,25 +40,27 @@
namespace ripple {
inline constexpr std::array<std::pair<SizedItem, std::array<int, 5>>, 12>
sizedItems{
{// FIXME: We should document each of these items, explaining exactly
// what
// they control and whether there exists an explicit config
// option that can be used to override the default.
{SizedItem::sweepInterval, {{10, 30, 60, 90, 120}}},
{SizedItem::treeCacheSize,
{{128000, 256000, 512000, 768000, 2048000}}},
{SizedItem::treeCacheAge, {{30, 60, 90, 120, 900}}},
{SizedItem::ledgerSize, {{32, 128, 256, 384, 768}}},
{SizedItem::ledgerAge, {{30, 90, 180, 240, 900}}},
{SizedItem::ledgerFetch, {{2, 3, 4, 5, 8}}},
{SizedItem::nodeCacheSize, {{16384, 32768, 131072, 262144, 524288}}},
{SizedItem::nodeCacheAge, {{60, 90, 120, 900, 1800}}},
{SizedItem::hashNodeDBCache, {{4, 12, 24, 64, 128}}},
{SizedItem::txnDBCache, {{4, 12, 24, 64, 128}}},
{SizedItem::lgrDBCache, {{4, 8, 16, 32, 128}}},
{SizedItem::openFinalLimit, {{8, 16, 32, 64, 128}}}}};
// The configurable node sizes are "tiny", "small", "medium", "large", "huge"
inline constexpr std::array<std::pair<SizedItem, std::array<int, 5>>, 13>
sizedItems{{
// FIXME: We should document each of these items, explaining exactly
// what
// they control and whether there exists an explicit config
// option that can be used to override the default.
{SizedItem::sweepInterval, {{10, 30, 60, 90, 120}}},
{SizedItem::treeCacheSize, {{128000, 256000, 512000, 768000, 2048000}}},
{SizedItem::treeCacheAge, {{30, 60, 90, 120, 900}}},
{SizedItem::ledgerSize, {{32, 128, 256, 384, 768}}},
{SizedItem::ledgerAge, {{30, 90, 180, 240, 900}}},
{SizedItem::ledgerFetch, {{2, 3, 4, 5, 8}}},
{SizedItem::nodeCacheSize, {{16384, 32768, 131072, 262144, 524288}}},
{SizedItem::nodeCacheAge, {{60, 90, 120, 900, 1800}}},
{SizedItem::hashNodeDBCache, {{4, 12, 24, 64, 128}}},
{SizedItem::txnDBCache, {{4, 12, 24, 64, 128}}},
{SizedItem::lgrDBCache, {{4, 8, 16, 32, 128}}},
{SizedItem::openFinalLimit, {{8, 16, 32, 64, 128}}},
{SizedItem::burstSize, {{4, 8, 16, 32, 48}}},
}};
// Ensure that the order of entries in the table corresponds to the
// order of entries in the enum:

View File

@@ -42,6 +42,7 @@ public:
@param keyBytes The fixed number of bytes per key.
@param parameters A set of key/value configuration pairs.
@param burstSize Backend burst size in bytes.
@param scheduler The scheduler to use for running tasks.
@return A pointer to the Backend object.
*/
@@ -49,6 +50,7 @@ public:
createInstance(
size_t keyBytes,
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) = 0;
@@ -56,6 +58,7 @@ public:
@param keyBytes The fixed number of bytes per key.
@param parameters A set of key/value configuration pairs.
@param burstSize Backend burst size in bytes.
@param scheduler The scheduler to use for running tasks.
@param context The context used by database.
@return A pointer to the Backend object.
@@ -64,6 +67,7 @@ public:
createInstance(
size_t keyBytes,
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
nudb::context& context,
beast::Journal journal)

View File

@@ -60,6 +60,7 @@ public:
virtual std::unique_ptr<Backend>
make_Backend(
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) = 0;
@@ -81,6 +82,7 @@ public:
thrown.
@param name A diagnostic label for the database.
@param burstSize Backend burst size in bytes.
@param scheduler The scheduler to use for performing asynchronous tasks.
@param readThreads The number of async read threads to create
@param backendParameters The parameter string for the persistent
@@ -93,6 +95,7 @@ public:
virtual std::unique_ptr<Database>
make_Database(
std::string const& name,
std::size_t burstSize,
Scheduler& scheduler,
int readThreads,
Stoppable& parent,
@@ -106,6 +109,7 @@ public:
std::unique_ptr<Backend>
make_Backend(
Section const& config,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal);

View File

@@ -55,6 +55,7 @@ public:
createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override;
@@ -227,6 +228,7 @@ std::unique_ptr<Backend>
MemoryFactory::createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t,
Scheduler& scheduler,
beast::Journal journal)
{

View File

@@ -42,6 +42,7 @@ public:
beast::Journal const j_;
size_t const keyBytes_;
std::size_t const burstSize_;
std::string const name_;
nudb::store db_;
std::atomic<bool> deletePath_;
@@ -50,10 +51,12 @@ public:
NuDBBackend(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal)
: j_(journal)
, keyBytes_(keyBytes)
, burstSize_(burstSize)
, name_(get<std::string>(keyValues, "path"))
, deletePath_(false)
, scheduler_(scheduler)
@@ -66,11 +69,13 @@ public:
NuDBBackend(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
nudb::context& context,
beast::Journal journal)
: j_(journal)
, keyBytes_(keyBytes)
, burstSize_(burstSize)
, name_(get<std::string>(keyValues, "path"))
, db_(context)
, deletePath_(false)
@@ -130,6 +135,7 @@ public:
Throw<nudb::system_error>(ec);
if (db_.appnum() != currentType)
Throw<std::runtime_error>("nodestore: unknown appnum");
db_.set_burst(burstSize_);
}
bool
@@ -333,23 +339,25 @@ public:
createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override
{
return std::make_unique<NuDBBackend>(
keyBytes, keyValues, scheduler, journal);
keyBytes, keyValues, burstSize, scheduler, journal);
}
std::unique_ptr<Backend>
createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
nudb::context& context,
beast::Journal journal) override
{
return std::make_unique<NuDBBackend>(
keyBytes, keyValues, scheduler, context, journal);
keyBytes, keyValues, burstSize, scheduler, context, journal);
}
};

View File

@@ -136,7 +136,12 @@ public:
}
std::unique_ptr<Backend>
createInstance(size_t, Section const&, Scheduler&, beast::Journal) override
createInstance(
size_t,
Section const&,
std::size_t,
Scheduler&,
beast::Journal) override
{
return std::make_unique<NullBackend>();
}

View File

@@ -450,6 +450,7 @@ public:
createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t,
Scheduler& scheduler,
beast::Journal journal) override
{

View File

@@ -43,6 +43,7 @@ ManagerImp::missing_backend()
std::unique_ptr<Backend>
ManagerImp::make_Backend(
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal)
{
@@ -55,19 +56,20 @@ ManagerImp::make_Backend(
missing_backend();
return factory->createInstance(
NodeObject::keyBytes, parameters, scheduler, journal);
NodeObject::keyBytes, parameters, burstSize, scheduler, journal);
}
std::unique_ptr<Database>
ManagerImp::make_Database(
std::string const& name,
std::size_t burstSize,
Scheduler& scheduler,
int readThreads,
Stoppable& parent,
Section const& config,
beast::Journal journal)
{
auto backend{make_Backend(config, scheduler, journal)};
auto backend{make_Backend(config, burstSize, scheduler, journal)};
backend->open();
return std::make_unique<DatabaseNodeImp>(
name,
@@ -124,10 +126,12 @@ Manager::instance()
std::unique_ptr<Backend>
make_Backend(
Section const& config,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal)
{
return Manager::instance().make_Backend(config, scheduler, journal);
return Manager::instance().make_Backend(
config, burstSize, scheduler, journal);
}
} // namespace NodeStore

View File

@@ -54,12 +54,14 @@ public:
std::unique_ptr<Backend>
make_Backend(
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override;
std::unique_ptr<Database>
make_Database(
std::string const& name,
std::size_t burstSize,
Scheduler& scheduler,
int readThreads,
Stoppable& parent,

View File

@@ -115,7 +115,12 @@ Shard::init(Scheduler& scheduler, nudb::context& context)
return false;
}
backend_ = factory->createInstance(
NodeObject::keyBytes, section, scheduler, context, j_);
NodeObject::keyBytes,
section,
megabytes(app_.config().getValueFor(SizedItem::burstSize, boost::none)),
scheduler,
context,
j_);
return open(lock);
}

View File

@@ -58,8 +58,8 @@ public:
{
// Open the backend
std::unique_ptr<Backend> backend =
Manager::instance().make_Backend(params, scheduler, journal);
std::unique_ptr<Backend> backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
backend->open();
// Write the batch
@@ -83,8 +83,8 @@ public:
{
// Re-open the backend
std::unique_ptr<Backend> backend =
Manager::instance().make_Backend(params, scheduler, journal);
std::unique_ptr<Backend> backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
backend->open();
// Read it back in

View File

@@ -462,7 +462,13 @@ public:
// Write to source db
{
std::unique_ptr<Database> src = Manager::instance().make_Database(
"test", scheduler, 2, parent, srcParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
srcParams,
journal_);
storeBatch(*src, batch);
}
@@ -471,7 +477,13 @@ public:
{
// Re-open the db
std::unique_ptr<Database> src = Manager::instance().make_Database(
"test", scheduler, 2, parent, srcParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
srcParams,
journal_);
// Set up the destination database
beast::temp_dir dest_db;
@@ -480,7 +492,13 @@ public:
destParams.set("path", dest_db.path());
std::unique_ptr<Database> dest = Manager::instance().make_Database(
"test", scheduler, 2, parent, destParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
destParams,
journal_);
testcase(
"import into '" + destBackendType + "' from '" +
@@ -528,7 +546,13 @@ public:
{
// Open the database
std::unique_ptr<Database> db = Manager::instance().make_Database(
"test", scheduler, 2, parent, nodeParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
nodeParams,
journal_);
// Write the batch
storeBatch(*db, batch);
@@ -553,7 +577,13 @@ public:
{
// Re-open the database without the ephemeral DB
std::unique_ptr<Database> db = Manager::instance().make_Database(
"test", scheduler, 2, parent, nodeParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
nodeParams,
journal_);
// Read it back in
Batch copy;
@@ -572,7 +602,13 @@ public:
// Verify default earliest ledger sequence
std::unique_ptr<Database> db =
Manager::instance().make_Database(
"test", scheduler, 2, parent, nodeParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
nodeParams,
journal_);
BEAST_EXPECT(
db->earliestLedgerSeq() == XRP_LEDGER_EARLIEST_SEQ);
}
@@ -583,7 +619,13 @@ public:
nodeParams.set("earliest_seq", "0");
std::unique_ptr<Database> db =
Manager::instance().make_Database(
"test", scheduler, 2, parent, nodeParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
nodeParams,
journal_);
}
catch (std::runtime_error const& e)
{
@@ -596,7 +638,13 @@ public:
nodeParams.set("earliest_seq", "1");
std::unique_ptr<Database> db =
Manager::instance().make_Database(
"test", scheduler, 2, parent, nodeParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
nodeParams,
journal_);
// Verify database uses the earliest ledger sequence setting
BEAST_EXPECT(db->earliestLedgerSeq() == 1);
@@ -610,7 +658,13 @@ public:
"earliest_seq", std::to_string(XRP_LEDGER_EARLIEST_SEQ));
std::unique_ptr<Database> db2 =
Manager::instance().make_Database(
"test", scheduler, 2, parent, nodeParams, journal_);
"test",
megabytes(4),
scheduler,
2,
parent,
nodeParams,
journal_);
}
catch (std::runtime_error const& e)
{

View File

@@ -261,7 +261,7 @@ public:
beast::Journal journal)
{
DummyScheduler scheduler;
auto backend = make_Backend(config, scheduler, journal);
auto backend = make_Backend(config, megabytes(4), scheduler, journal);
BEAST_EXPECT(backend != nullptr);
backend->open();
@@ -318,7 +318,7 @@ public:
beast::Journal journal)
{
DummyScheduler scheduler;
auto backend = make_Backend(config, scheduler, journal);
auto backend = make_Backend(config, megabytes(4), scheduler, journal);
BEAST_EXPECT(backend != nullptr);
backend->open();
@@ -389,7 +389,7 @@ public:
beast::Journal journal)
{
DummyScheduler scheduler;
auto backend = make_Backend(config, scheduler, journal);
auto backend = make_Backend(config, megabytes(4), scheduler, journal);
BEAST_EXPECT(backend != nullptr);
backend->open();
@@ -462,7 +462,7 @@ public:
beast::Journal journal)
{
DummyScheduler scheduler;
auto backend = make_Backend(config, scheduler, journal);
auto backend = make_Backend(config, megabytes(4), scheduler, journal);
BEAST_EXPECT(backend != nullptr);
backend->open();
@@ -551,7 +551,7 @@ public:
do_work(Section const& config, Params const& params, beast::Journal journal)
{
DummyScheduler scheduler;
auto backend = make_Backend(config, scheduler, journal);
auto backend = make_Backend(config, megabytes(4), scheduler, journal);
BEAST_EXPECT(backend != nullptr);
backend->setDeletePath();
backend->open();

View File

@@ -61,7 +61,7 @@ public:
testSection.set("type", "memory");
testSection.set("Path", "SHAMap_test");
db_ = NodeStore::Manager::instance().make_Database(
"test", scheduler_, 1, parent_, testSection, j);
"test", megabytes(4), scheduler_, 1, parent_, testSection, j);
}
NodeStore::Database&