Improve online_delete configuration and DB tuning:

* Document delete_batch, back_off_milliseconds, age_threshold_seconds.
* Convert those time values to chrono types.
* Fix bug that ignored age_threshold_seconds.
* Add a "recovery buffer" to the config that gives the node a chance to
  recover before aborting online delete.
* Add begin/end log messages around the SQL queries.
* Add a new configuration section: [sqlite] to allow tuning the sqlite
  database operations. Ignored on full/large history servers.
* Update documentation of [node_db] and [sqlite] in the
  rippled-example.cfg file.

Resolves #3321
This commit is contained in:
Edward Hennis
2020-05-11 16:48:34 -04:00
committed by Nik Bougalis
parent 00702f28c2
commit 4702c8b591
21 changed files with 1086 additions and 271 deletions

View File

@@ -36,7 +36,7 @@
# For more information on where the rippled server instance searches for the # For more information on where the rippled server instance searches for the
# file, visit: # file, visit:
# #
# https://developers.ripple.com/commandline-usage.html#generic-options # https://xrpl.org/commandline-usage.html#generic-options
# #
# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX, # This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX,
# or Mac style end of lines. Blank lines and lines beginning with '#' are # or Mac style end of lines. Blank lines and lines beginning with '#' are
@@ -869,18 +869,65 @@
# #
# These keys are possible for any type of backend: # These keys are possible for any type of backend:
# #
# earliest_seq The default is 32570 to match the XRP ledger
# network's earliest allowed sequence. Alternate
# networks may set this value. Minimum value of 1.
# If a [shard_db] section is defined, and this
# value is present either [node_db] or [shard_db],
# it must be defined with the same value in both
# sections.
#
# online_delete Minimum value of 256. Enable automatic purging # online_delete Minimum value of 256. Enable automatic purging
# of older ledger information. Maintain at least this # of older ledger information. Maintain at least this
# number of ledger records online. Must be greater # number of ledger records online. Must be greater
# than or equal to ledger_history. # than or equal to ledger_history.
# #
# advisory_delete 0 for disabled, 1 for enabled. If set, then # These keys modify the behavior of online_delete, and thus are only
# require administrative RPC call "can_delete" # relevant if online_delete is defined and non-zero:
# to enable online deletion of ledger records.
# #
# earliest_seq The default is 32570 to match the XRP ledger # advisory_delete 0 for disabled, 1 for enabled. If set, the
# network's earliest allowed sequence. Alternate # administrative RPC call "can_delete" is required
# networks may set this value. Minimum value of 1. # to enable online deletion of ledger records.
# Online deletion does not run automatically if
# non-zero and the last deletion was on a ledger
# greater than the current "can_delete" setting.
# Default is 0.
#
# delete_batch When automatically purging, SQLite database
# records are deleted in batches. This value
# controls the maximum size of each batch. Larger
# batches keep the databases locked for more time,
# which may cause other functions to fall behind,
# and thus cause the node to lose sync.
# Default is 100.
#
# back_off_milliseconds
# Number of milliseconds to wait between
# online_delete batches to allow other functions
# to catch up.
# Default is 100.
#
# age_threshold_seconds
# The online delete process will only run if the
# latest validated ledger is younger than this
# number of seconds.
# Default is 60.
#
# recovery_wait_seconds
# The online delete process checks periodically
# that rippled is still in sync with the network,
# and that the validated ledger is less than
# 'age_threshold_seconds' old. By default, if it
# is not the online delete process aborts and
# tries again later. If 'recovery_wait_seconds'
# is set and rippled is out of sync, but likely to
# recover quickly, then online delete will wait
# this number of seconds for rippled to get back
# into sync before it aborts.
# Set this value if the node is otherwise staying
# in sync, or recovering quickly, but the online
# delete process is unable to finish.
# Default is unset.
# #
# Notes: # Notes:
# The 'node_db' entry configures the primary, persistent storage. # The 'node_db' entry configures the primary, persistent storage.
@@ -892,6 +939,12 @@
# [import_db] Settings for performing a one-time import (optional) # [import_db] Settings for performing a one-time import (optional)
# [database_path] Path to the book-keeping databases. # [database_path] Path to the book-keeping databases.
# #
# The server creates and maintains 4 to 5 bookkeeping SQLite databases in
# the 'database_path' location. If you omit this configuration setting,
# the server creates a directory called "db" located in the same place as
# your rippled.cfg file.
# Partial pathnames are relative to the location of the rippled executable.
#
# [shard_db] Settings for the Shard Database (optional) # [shard_db] Settings for the Shard Database (optional)
# #
# Format (without spaces): # Format (without spaces):
@@ -907,12 +960,84 @@
# #
# max_size_gb Maximum disk space the database will utilize (in gigabytes) # max_size_gb Maximum disk space the database will utilize (in gigabytes)
# #
# [sqlite] Tuning settings for the SQLite databases (optional)
# #
# There are 4 bookkeeping SQLite database that the server creates and # Format (without spaces):
# maintains. If you omit this configuration setting, it will default to # One or more lines of case-insensitive key / value pairs:
# creating a directory called "db" located in the same place as your # <key> '=' <value>
# rippled.cfg file. Partial pathnames will be considered relative to # ...
# the location of the rippled executable. #
# Example 1:
# sync_level=low
#
# Example 2:
# journal_mode=off
# synchronous=off
#
# WARNING: These settings can have significant effects on data integrity,
# particularly in systemic failure scenarios. It is strongly recommended
# that they be left at their defaults unless the server is having
# performance issues during normal operation or during automatic purging
# (online_delete) operations. A warning will be logged on startup if
# 'ledger_history' is configured to store more than 10,000,000 ledgers and
# any of these settings are less safe than the default. This is due to the
# inordinate amount of time and bandwidth it will take to safely rebuild a
# corrupted database of that size from other peers.
#
# Optional keys:
#
# safety_level Valid values: high, low
# The default is "high", which tunes the SQLite
# databases in the most reliable mode, and is
# equivalent to:
# journal_mode=wal
# synchronous=normal
# temp_store=file
# "low" is equivalent to:
# journal_mode=memory
# synchronous=off
# temp_store=memory
# These "low" settings trade speed and reduced I/O
# for a higher risk of data loss. See the
# individual settings below for more information.
# This setting may not be combined with any of the
# other tuning settings: "journal_mode",
# "synchronous", or "temp_store".
#
# journal_mode Valid values: delete, truncate, persist, memory, wal, off
# The default is "wal", which uses a write-ahead
# log to implement database transactions.
# Alternately, "memory" saves disk I/O, but if
# rippled crashes during a transaction, the
# database is likely to be corrupted.
# See https://www.sqlite.org/pragma.html#pragma_journal_mode
# for more details about the available options.
# This setting may not be combined with the
# "safety_level" setting.
#
# synchronous Valid values: off, normal, full, extra
# The default is "normal", which works well with
# the "wal" journal mode. Alternatively, "off"
# allows rippled to continue as soon as data is
# passed to the OS, which can significantly
# increase speed, but risks data corruption if
# the host computer crashes before writing that
# data to disk.
# See https://www.sqlite.org/pragma.html#pragma_synchronous
# for more details about the available options.
# This setting may not be combined with the
# "safety_level" setting.
#
# temp_store Valid values: default, file, memory
# The default is "file", which will use files
# for temporary database tables and indices.
# Alternatively, "memory" may save I/O, but
# rippled does not currently use many, if any,
# of these temporary objects.
# See https://www.sqlite.org/pragma.html#pragma_temp_store
# for more details about the available options.
# This setting may not be combined with the
# "safety_level" setting.
# #
# #
# #
@@ -1212,24 +1337,27 @@ medium
# This is primary persistent datastore for rippled. This includes transaction # This is primary persistent datastore for rippled. This includes transaction
# metadata, account states, and ledger headers. Helpful information can be # metadata, account states, and ledger headers. Helpful information can be
# found here: https://ripple.com/wiki/NodeBackEnd # found at https://xrpl.org/capacity-planning.html#node-db-type
# delete old ledgers while maintaining at least 2000. Do not require an # type=NuDB is recommended for non-validators with fast SSDs. Validators or
# external administrative command to initiate deletion. # slow / spinning disks should use RocksDB. Caution: Spinning disks are
# not recommended. They do not perform well enough to consistently remain
# synced to the network.
# online_delete=512 is recommended to delete old ledgers while maintaining at
# least 512.
# advisory_delete=0 allows the online delete process to run automatically
# when the node has approximately two times the "online_delete" value of
# ledgers. No external administrative command is required to initiate
# deletion.
[node_db] [node_db]
type=RocksDB type=NuDB
path=/var/lib/rippled/db/rocksdb path=/var/lib/rippled/db/nudb
open_files=2000 online_delete=512
filter_bits=12
cache_mb=256
file_size_mb=8
file_size_mult=2
online_delete=2000
advisory_delete=0 advisory_delete=0
# This is the persistent datastore for shards. It is important for the health # This is the persistent datastore for shards. It is important for the health
# of the ripple network that rippled operators shard as much as practical. # of the ripple network that rippled operators shard as much as practical.
# NuDB requires SSD storage. Helpful information can be found here # NuDB requires SSD storage. Helpful information can be found at
# https://ripple.com/build/history-sharding # https://xrpl.org/history-sharding.html
#[shard_db] #[shard_db]
#path=/var/lib/rippled/db/shards/nudb #path=/var/lib/rippled/db/shards/nudb
#max_size_gb=500 #max_size_gb=500
@@ -1248,7 +1376,8 @@ time.apple.com
time.nist.gov time.nist.gov
pool.ntp.org pool.ntp.org
# To use the XRP test network (see https://ripple.com/build/xrp-test-net/), # To use the XRP test network
# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html),
# use the following [ips] section: # use the following [ips] section:
# [ips] # [ips]
# r.altnet.rippletest.net 51235 # r.altnet.rippletest.net 51235

View File

@@ -228,14 +228,14 @@ Ledger::Ledger(
!txMap_->fetchRoot(SHAMapHash{info_.txHash}, nullptr)) !txMap_->fetchRoot(SHAMapHash{info_.txHash}, nullptr))
{ {
loaded = false; loaded = false;
JLOG(j.warn()) << "Don't have TX root for ledger"; JLOG(j.warn()) << "Don't have transaction root for ledger" << info_.seq;
} }
if (info_.accountHash.isNonZero() && if (info_.accountHash.isNonZero() &&
!stateMap_->fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) !stateMap_->fetchRoot(SHAMapHash{info_.accountHash}, nullptr))
{ {
loaded = false; loaded = false;
JLOG(j.warn()) << "Don't have AS root for ledger"; JLOG(j.warn()) << "Don't have state data root for ledger" << info_.seq;
} }
txMap_->setImmutable(); txMap_->setImmutable();

View File

@@ -1019,7 +1019,7 @@ public:
try try
{ {
auto const setup = setup_DatabaseCon(*config_); auto setup = setup_DatabaseCon(*config_, m_journal);
// transaction database // transaction database
mTxnDB = std::make_unique<DatabaseCon>( mTxnDB = std::make_unique<DatabaseCon>(
@@ -1069,6 +1069,7 @@ public:
mLedgerDB->setupCheckpointing(m_jobQueue.get(), logs()); mLedgerDB->setupCheckpointing(m_jobQueue.get(), logs());
// wallet database // wallet database
setup.useGlobalPragma = false;
mWalletDB = std::make_unique<DatabaseCon>( mWalletDB = std::make_unique<DatabaseCon>(
setup, setup,
WalletDBName, WalletDBName,
@@ -1360,7 +1361,7 @@ public:
JLOG(m_journal.fatal()) JLOG(m_journal.fatal())
<< "Free SQLite space for transaction db is less than " << "Free SQLite space for transaction db is less than "
"512MB. To fix this, rippled must be executed with the " "512MB. To fix this, rippled must be executed with the "
"vacuum <sqlitetmpdir> parameter before restarting. " "\"--vacuum\" parameter before restarting. "
"Note that this activity can take multiple days, " "Note that this activity can take multiple days, "
"depending on database size."; "depending on database size.";
signalStop(); signalStop();

View File

@@ -26,13 +26,23 @@ namespace ripple {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// These pragmas are built at startup and applied to all database
// connections, unless otherwise noted.
inline constexpr char const* CommonDBPragmaJournal{"PRAGMA journal_mode=%s;"};
inline constexpr char const* CommonDBPragmaSync{"PRAGMA synchronous=%s;"};
inline constexpr char const* CommonDBPragmaTemp{"PRAGMA temp_store=%s;"};
// A warning will be logged if any lower-safety sqlite tuning settings
// are used and at least this much ledger history is configured. This
// includes full history nodes. This is because such a large amount of
// data will be more difficult to recover if a rare failure occurs,
// which are more likely with some of the other available tuning settings.
inline constexpr std::uint32_t SQLITE_TUNING_CUTOFF = 10'000'000;
// Ledger database holds ledgers and ledger confirmations // Ledger database holds ledgers and ledger confirmations
inline constexpr auto LgrDBName{"ledger.db"}; inline constexpr auto LgrDBName{"ledger.db"};
inline constexpr std::array<char const*, 3> LgrDBPragma{ inline constexpr std::array<char const*, 1> LgrDBPragma{
{"PRAGMA synchronous=NORMAL;", {"PRAGMA journal_size_limit=1582080;"}};
"PRAGMA journal_mode=WAL;",
"PRAGMA journal_size_limit=1582080;"}};
inline constexpr std::array<char const*, 5> LgrDBInit{ inline constexpr std::array<char const*, 5> LgrDBInit{
{"BEGIN TRANSACTION;", {"BEGIN TRANSACTION;",
@@ -61,22 +71,13 @@ inline constexpr std::array<char const*, 5> LgrDBInit{
// Transaction database holds transactions and public keys // Transaction database holds transactions and public keys
inline constexpr auto TxDBName{"transaction.db"}; inline constexpr auto TxDBName{"transaction.db"};
inline constexpr inline constexpr std::array TxDBPragma
#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP)
std::array<char const*, 6>
TxDBPragma
{ {
{ "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;",
#else "PRAGMA max_page_count=2147483646;",
std::array<char const*, 5> TxDBPragma {{
#endif
"PRAGMA page_size=4096;", "PRAGMA synchronous=NORMAL;",
"PRAGMA journal_mode=WAL;", "PRAGMA journal_size_limit=1582080;",
"PRAGMA max_page_count=2147483646;",
#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) #if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP)
"PRAGMA mmap_size=17179869184;" "PRAGMA mmap_size=17179869184;"
#endif #endif
}
}; };
inline constexpr std::array<char const*, 8> TxDBInit{ inline constexpr std::array<char const*, 8> TxDBInit{
@@ -115,10 +116,8 @@ inline constexpr std::array<char const*, 8> TxDBInit{
// Temporary database used with an incomplete shard that is being acquired // Temporary database used with an incomplete shard that is being acquired
inline constexpr auto AcquireShardDBName{"acquire.db"}; inline constexpr auto AcquireShardDBName{"acquire.db"};
inline constexpr std::array<char const*, 3> AcquireShardDBPragma{ inline constexpr std::array<char const*, 1> AcquireShardDBPragma{
{"PRAGMA synchronous=NORMAL;", {"PRAGMA journal_size_limit=1582080;"}};
"PRAGMA journal_mode=WAL;",
"PRAGMA journal_size_limit=1582080;"}};
inline constexpr std::array<char const*, 1> AcquireShardDBInit{ inline constexpr std::array<char const*, 1> AcquireShardDBInit{
{"CREATE TABLE IF NOT EXISTS Shard ( \ {"CREATE TABLE IF NOT EXISTS Shard ( \
@@ -130,6 +129,7 @@ inline constexpr std::array<char const*, 1> AcquireShardDBInit{
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Pragma for Ledger and Transaction databases with complete shards // Pragma for Ledger and Transaction databases with complete shards
// These override the CommonDBPragma values defined above.
inline constexpr std::array<char const*, 2> CompleteShardDBPragma{ inline constexpr std::array<char const*, 2> CompleteShardDBPragma{
{"PRAGMA synchronous=OFF;", "PRAGMA journal_mode=OFF;"}}; {"PRAGMA synchronous=OFF;", "PRAGMA journal_mode=OFF;"}};
@@ -172,6 +172,7 @@ inline constexpr std::array<char const*, 6> WalletDBInit{
static constexpr auto stateDBName{"state.db"}; static constexpr auto stateDBName{"state.db"};
// These override the CommonDBPragma values defined above.
static constexpr std::array<char const*, 2> DownloaderDBPragma{ static constexpr std::array<char const*, 2> DownloaderDBPragma{
{"PRAGMA synchronous=FULL;", "PRAGMA journal_mode=DELETE;"}}; {"PRAGMA synchronous=FULL;", "PRAGMA journal_mode=DELETE;"}};

View File

@@ -354,10 +354,7 @@ run(int argc, char** argv)
"nodetoshard", "Import node store into shards")( "nodetoshard", "Import node store into shards")(
"replay", "Replay a ledger close.")( "replay", "Replay a ledger close.")(
"start", "Start from a fresh Ledger.")( "start", "Start from a fresh Ledger.")(
"vacuum", "vacuum", "VACUUM the transaction db.")(
po::value<std::string>(),
"VACUUM the transaction db. Mandatory string argument specifies "
"temporary directory path.")(
"valid", "Consider the initial ledger a valid network ledger.")( "valid", "Consider the initial ledger a valid network ledger.")(
"validateShards", shardsText.c_str()); "validateShards", shardsText.c_str());
@@ -520,24 +517,22 @@ run(int argc, char** argv)
} }
using namespace boost::filesystem; using namespace boost::filesystem;
DatabaseCon::Setup dbSetup = setup_DatabaseCon(*config); DatabaseCon::Setup const dbSetup = setup_DatabaseCon(*config);
path dbPath = dbSetup.dataDir / TxDBName; path dbPath = dbSetup.dataDir / TxDBName;
path tmpPath = vm["vacuum"].as<std::string>();
try try
{ {
uintmax_t const dbSize = file_size(dbPath); uintmax_t const dbSize = file_size(dbPath);
assert(dbSize != static_cast<uintmax_t>(-1)); assert(dbSize != static_cast<uintmax_t>(-1));
if (space(tmpPath).available < dbSize) if (auto available = space(dbPath.parent_path()).available;
available < dbSize)
{ {
std::cerr << "A valid directory for vacuuming must be " std::cerr << "The database filesystem must have at least as "
"specified on a filesystem with at least " "much free space as the size of "
"as much free space as the size of "
<< dbPath.string() << ", which is " << dbSize << dbPath.string() << ", which is " << dbSize
<< " bytes. The filesystem for " << tmpPath.string() << " bytes. Only " << available
<< " only has " << space(tmpPath).available << " bytes are available.\n";
<< " bytes.\n";
return -1; return -1;
} }
@@ -546,16 +541,19 @@ run(int argc, char** argv)
auto& session = txnDB->getSession(); auto& session = txnDB->getSession();
std::uint32_t pageSize; std::uint32_t pageSize;
// Only the most trivial databases will fit in memory on typical
// (recommended) software. Force temp files to be written to disk
// regardless of the config settings.
session << boost::format(CommonDBPragmaTemp) % "file";
session << "PRAGMA page_size;", soci::into(pageSize); session << "PRAGMA page_size;", soci::into(pageSize);
std::cout << "VACUUM beginning. page_size: " << pageSize std::cout << "VACUUM beginning. page_size: " << pageSize
<< std::endl; << std::endl;
session << "PRAGMA journal_mode=OFF;";
session << "PRAGMA temp_store_directory=\"" << tmpPath.string()
<< "\";";
session << "VACUUM;"; session << "VACUUM;";
session << "PRAGMA journal_mode=WAL;"; assert(dbSetup.globalPragma);
for (auto const& p : *dbSetup.globalPragma)
session << p;
session << "PRAGMA page_size;", soci::into(pageSize); session << "PRAGMA page_size;", soci::into(pageSize);
std::cout << "VACUUM finished. page_size: " << pageSize std::cout << "VACUUM finished. page_size: " << pageSize

View File

@@ -2757,12 +2757,12 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
if (std::abs(closeOffset.count()) >= 60) if (std::abs(closeOffset.count()) >= 60)
l[jss::close_time_offset] = closeOffset.count(); l[jss::close_time_offset] = closeOffset.count();
constexpr std::chrono::seconds HIGH_AGE_THRESHOLD{1000000}; constexpr std::chrono::seconds highAgeThreshold{1000000};
if (m_ledgerMaster.haveValidated()) if (m_ledgerMaster.haveValidated())
{ {
auto const age = m_ledgerMaster.getValidatedLedgerAge(); auto const age = m_ledgerMaster.getValidatedLedgerAge();
l[jss::age] = l[jss::age] =
Json::UInt(age < HIGH_AGE_THRESHOLD ? age.count() : 0); Json::UInt(age < highAgeThreshold ? age.count() : 0);
} }
else else
{ {
@@ -2773,7 +2773,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
using namespace std::chrono_literals; using namespace std::chrono_literals;
auto age = closeTime - lCloseTime; auto age = closeTime - lCloseTime;
l[jss::age] = l[jss::age] =
Json::UInt(age < HIGH_AGE_THRESHOLD ? age.count() : 0); Json::UInt(age < highAgeThreshold ? age.count() : 0);
} }
} }
} }

View File

@@ -180,13 +180,24 @@ SHAMapStoreImp::SHAMapStoreImp(
section.set("filter_bits", "10"); section.set("filter_bits", "10");
} }
get_if_exists(section, "delete_batch", deleteBatch_);
get_if_exists(section, "backOff", backOff_);
get_if_exists(section, "age_threshold", ageThreshold_);
get_if_exists(section, "online_delete", deleteInterval_); get_if_exists(section, "online_delete", deleteInterval_);
if (deleteInterval_) if (deleteInterval_)
{ {
// Configuration that affects the behavior of online delete
get_if_exists(section, "delete_batch", deleteBatch_);
std::uint32_t temp;
if (get_if_exists(section, "back_off_milliseconds", temp) ||
// Included for backward compaibility with an undocumented setting
get_if_exists(section, "backOff", temp))
{
backOff_ = std::chrono::milliseconds{temp};
}
if (get_if_exists(section, "age_threshold_seconds", temp))
ageThreshold_ = std::chrono::seconds{temp};
if (get_if_exists(section, "recovery_wait_seconds", temp))
recoveryWaitTime_.emplace(std::chrono::seconds{temp});
get_if_exists(section, "advisory_delete", advisoryDelete_); get_if_exists(section, "advisory_delete", advisoryDelete_);
auto const minInterval = config.standalone() auto const minInterval = config.standalone()
@@ -348,23 +359,14 @@ SHAMapStoreImp::run()
// will delete up to (not including) lastRotated // will delete up to (not including) lastRotated
if (validatedSeq >= lastRotated + deleteInterval_ && if (validatedSeq >= lastRotated + deleteInterval_ &&
canDelete_ >= lastRotated - 1) canDelete_ >= lastRotated - 1 && !health())
{ {
JLOG(journal_.warn()) JLOG(journal_.warn())
<< "rotating validatedSeq " << validatedSeq << " lastRotated " << "rotating validatedSeq " << validatedSeq << " lastRotated "
<< lastRotated << " deleteInterval " << deleteInterval_ << lastRotated << " deleteInterval " << deleteInterval_
<< " canDelete_ " << canDelete_; << " canDelete_ " << canDelete_ << " state "
<< app_.getOPs().strOperatingMode(false) << " age "
switch (health()) << ledgerMaster_->getValidatedLedgerAge().count() << 's';
{
case Health::stopping:
stopped();
return;
case Health::unhealthy:
continue;
case Health::ok:
default:;
}
clearPrior(lastRotated); clearPrior(lastRotated);
switch (health()) switch (health())
@@ -378,27 +380,29 @@ SHAMapStoreImp::run()
default:; default:;
} }
JLOG(journal_.debug()) << "copying ledger " << validatedSeq;
std::uint64_t nodeCount = 0; std::uint64_t nodeCount = 0;
validatedLedger->stateMap().snapShot(false)->visitNodes(std::bind( validatedLedger->stateMap().snapShot(false)->visitNodes(std::bind(
&SHAMapStoreImp::copyNode, &SHAMapStoreImp::copyNode,
this, this,
std::ref(nodeCount), std::ref(nodeCount),
std::placeholders::_1)); std::placeholders::_1));
switch (health())
{
case Health::stopping:
stopped();
return;
case Health::unhealthy:
continue;
case Health::ok:
default:;
}
// Only log if we completed without a "health" abort
JLOG(journal_.debug()) << "copied ledger " << validatedSeq JLOG(journal_.debug()) << "copied ledger " << validatedSeq
<< " nodecount " << nodeCount; << " nodecount " << nodeCount;
switch (health())
{
case Health::stopping:
stopped();
return;
case Health::unhealthy:
continue;
case Health::ok:
default:;
}
JLOG(journal_.debug()) << "freshening caches";
freshenCaches(); freshenCaches();
JLOG(journal_.debug()) << validatedSeq << " freshened caches";
switch (health()) switch (health())
{ {
case Health::stopping: case Health::stopping:
@@ -409,7 +413,10 @@ SHAMapStoreImp::run()
case Health::ok: case Health::ok:
default:; default:;
} }
// Only log if we completed without a "health" abort
JLOG(journal_.debug()) << validatedSeq << " freshened caches";
JLOG(journal_.trace()) << "Making a new backend";
auto newBackend = makeBackendRotating(); auto newBackend = makeBackendRotating();
JLOG(journal_.debug()) JLOG(journal_.debug())
<< validatedSeq << " new backend " << newBackend->getName(); << validatedSeq << " new backend " << newBackend->getName();
@@ -559,26 +566,38 @@ SHAMapStoreImp::makeBackendRotating(std::string path)
return backend; return backend;
} }
bool void
SHAMapStoreImp::clearSql( SHAMapStoreImp::clearSql(
DatabaseCon& database, DatabaseCon& database,
LedgerIndex lastRotated, LedgerIndex lastRotated,
std::string const& minQuery, std::string const& minQuery,
std::string const& deleteQuery) std::string const& deleteQuery)
{ {
assert(deleteInterval_);
LedgerIndex min = std::numeric_limits<LedgerIndex>::max(); LedgerIndex min = std::numeric_limits<LedgerIndex>::max();
{ {
auto db = database.checkoutDb();
boost::optional<std::uint64_t> m; boost::optional<std::uint64_t> m;
*db << minQuery, soci::into(m); JLOG(journal_.trace())
<< "Begin: Look up lowest value of: " << minQuery;
{
auto db = database.checkoutDb();
*db << minQuery, soci::into(m);
}
JLOG(journal_.trace()) << "End: Look up lowest value of: " << minQuery;
if (!m) if (!m)
return false; return;
min = *m; min = *m;
} }
if (min > lastRotated || health() != Health::ok) if (min > lastRotated || health() != Health::ok)
return false; return;
if (min == lastRotated)
{
// Micro-optimization mainly to clarify logs
JLOG(journal_.trace()) << "Nothing to delete from " << deleteQuery;
return;
}
boost::format formattedDeleteQuery(deleteQuery); boost::format formattedDeleteQuery(deleteQuery);
@@ -587,17 +606,24 @@ SHAMapStoreImp::clearSql(
while (min < lastRotated) while (min < lastRotated)
{ {
min = std::min(lastRotated, min + deleteBatch_); min = std::min(lastRotated, min + deleteBatch_);
JLOG(journal_.trace()) << "Begin: Delete up to " << deleteBatch_
<< " rows with LedgerSeq < " << min
<< " using query: " << deleteQuery;
{ {
auto db = database.checkoutDb(); auto db = database.checkoutDb();
*db << boost::str(formattedDeleteQuery % min); *db << boost::str(formattedDeleteQuery % min);
} }
JLOG(journal_.trace())
<< "End: Delete up to " << deleteBatch_ << " rows with LedgerSeq < "
<< min << " using query: " << deleteQuery;
if (health()) if (health())
return true; return;
if (min < lastRotated) if (min < lastRotated)
std::this_thread::sleep_for(std::chrono::milliseconds(backOff_)); std::this_thread::sleep_for(backOff_);
if (health())
return;
} }
JLOG(journal_.debug()) << "finished: " << deleteQuery; JLOG(journal_.debug()) << "finished: " << deleteQuery;
return true;
} }
void void
@@ -621,13 +647,14 @@ SHAMapStoreImp::freshenCaches()
void void
SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) SHAMapStoreImp::clearPrior(LedgerIndex lastRotated)
{ {
if (health())
return;
// Do not allow ledgers to be acquired from the network // Do not allow ledgers to be acquired from the network
// that are about to be deleted. // that are about to be deleted.
minimumOnline_ = lastRotated + 1; minimumOnline_ = lastRotated + 1;
JLOG(journal_.trace()) << "Begin: Clear internal ledgers up to "
<< lastRotated;
ledgerMaster_->clearPriorLedgers(lastRotated); ledgerMaster_->clearPriorLedgers(lastRotated);
JLOG(journal_.trace()) << "End: Clear internal ledgers up to "
<< lastRotated;
if (health()) if (health())
return; return;
@@ -666,16 +693,32 @@ SHAMapStoreImp::health()
} }
if (!netOPs_) if (!netOPs_)
return Health::ok; return Health::ok;
assert(deleteInterval_);
constexpr static std::chrono::seconds age_threshold(60); if (healthy_)
auto age = ledgerMaster_->getValidatedLedgerAge();
OperatingMode mode = netOPs_->getOperatingMode();
if (mode != OperatingMode::FULL || age > age_threshold)
{ {
JLOG(journal_.warn()) << "Not deleting. state: " auto age = ledgerMaster_->getValidatedLedgerAge();
<< app_.getOPs().strOperatingMode(mode, false) OperatingMode mode = netOPs_->getOperatingMode();
<< ". age " << age.count() << 's'; if (recoveryWaitTime_ && mode == OperatingMode::SYNCING &&
healthy_ = false; age < ageThreshold_)
{
JLOG(journal_.warn())
<< "Waiting " << recoveryWaitTime_->count()
<< "s for node to get back into sync with network. state: "
<< app_.getOPs().strOperatingMode(mode, false) << ". age "
<< age.count() << 's';
std::this_thread::sleep_for(*recoveryWaitTime_);
age = ledgerMaster_->getValidatedLedgerAge();
mode = netOPs_->getOperatingMode();
}
if (mode != OperatingMode::FULL || age > ageThreshold_)
{
JLOG(journal_.warn()) << "Not deleting. state: "
<< app_.getOPs().strOperatingMode(mode, false)
<< ". age " << age.count() << 's';
healthy_ = false;
}
} }
if (healthy_) if (healthy_)

View File

@@ -25,6 +25,7 @@
#include <ripple/core/DatabaseCon.h> #include <ripple/core/DatabaseCon.h>
#include <ripple/nodestore/DatabaseRotating.h> #include <ripple/nodestore/DatabaseRotating.h>
#include <atomic> #include <atomic>
#include <chrono>
#include <condition_variable> #include <condition_variable>
#include <thread> #include <thread>
@@ -106,8 +107,14 @@ private:
std::uint32_t deleteInterval_ = 0; std::uint32_t deleteInterval_ = 0;
bool advisoryDelete_ = false; bool advisoryDelete_ = false;
std::uint32_t deleteBatch_ = 100; std::uint32_t deleteBatch_ = 100;
std::uint32_t backOff_ = 100; std::chrono::milliseconds backOff_{100};
std::int32_t ageThreshold_ = 60; std::chrono::seconds ageThreshold_{60};
/// If set, and the node is out of sync during an
/// online_delete health check, sleep the thread
/// for this time and check again so the node can
/// recover.
/// See also: "recovery_wait_seconds" in rippled-example.cfg
boost::optional<std::chrono::seconds> recoveryWaitTime_;
// these do not exist upon SHAMapStore creation, but do exist // these do not exist upon SHAMapStore creation, but do exist
// as of onPrepare() or before // as of onPrepare() or before
@@ -212,13 +219,11 @@ private:
return false; return false;
} }
/** delete from sqlite table in batches to not lock the db excessively /** delete from sqlite table in batches to not lock the db excessively.
* pause briefly to extend access time to other users * Pause briefly to extend access time to other users.
* call with mutex object unlocked * Call with mutex object unlocked.
* @return true if any deletable rows were found (though not
* necessarily deleted.
*/ */
bool void
clearSql( clearSql(
DatabaseCon& database, DatabaseCon& database,
LedgerIndex lastRotated, LedgerIndex lastRotated,
@@ -236,6 +241,9 @@ private:
// Assume that, once unhealthy, a necessary step has been // Assume that, once unhealthy, a necessary step has been
// aborted, so the online-delete process needs to restart // aborted, so the online-delete process needs to restart
// at next ledger. // at next ledger.
// If recoveryWaitTime_ is set, this may sleep to give rippled
// time to recover, so never call it from any thread other than
// the main "run()".
Health Health
health(); health();
// //

View File

@@ -24,6 +24,7 @@
#include <ripple/core/Config.h> #include <ripple/core/Config.h>
#include <ripple/core/SociDB.h> #include <ripple/core/SociDB.h>
#include <boost/filesystem/path.hpp> #include <boost/filesystem/path.hpp>
#include <boost/optional.hpp>
#include <mutex> #include <mutex>
#include <string> #include <string>
@@ -89,6 +90,19 @@ public:
Config::StartUpType startUp = Config::NORMAL; Config::StartUpType startUp = Config::NORMAL;
bool standAlone = false; bool standAlone = false;
boost::filesystem::path dataDir; boost::filesystem::path dataDir;
// Indicates whether or not to return the `globalPragma`
// from commonPragma()
bool useGlobalPragma = false;
std::vector<std::string> const*
commonPragma() const
{
assert(!useGlobalPragma || globalPragma);
return useGlobalPragma && globalPragma ? globalPragma.get()
: nullptr;
}
static std::unique_ptr<std::vector<std::string> const> globalPragma;
}; };
template <std::size_t N, std::size_t M> template <std::size_t N, std::size_t M>
@@ -97,16 +111,17 @@ public:
std::string const& DBName, std::string const& DBName,
std::array<char const*, N> const& pragma, std::array<char const*, N> const& pragma,
std::array<char const*, M> const& initSQL) std::array<char const*, M> const& initSQL)
{
// Use temporary files or regular DB files? // Use temporary files or regular DB files?
auto const useTempFiles = setup.standAlone && : DatabaseCon(
setup.startUp != Config::LOAD && setup.standAlone && setup.startUp != Config::LOAD &&
setup.startUp != Config::LOAD_FILE && setup.startUp != Config::LOAD_FILE &&
setup.startUp != Config::REPLAY; setup.startUp != Config::REPLAY
boost::filesystem::path pPath = ? ""
useTempFiles ? "" : (setup.dataDir / DBName); : (setup.dataDir / DBName),
setup.commonPragma(),
init(pPath, pragma, initSQL); pragma,
initSQL)
{
} }
template <std::size_t N, std::size_t M> template <std::size_t N, std::size_t M>
@@ -115,8 +130,8 @@ public:
std::string const& DBName, std::string const& DBName,
std::array<char const*, N> const& pragma, std::array<char const*, N> const& pragma,
std::array<char const*, M> const& initSQL) std::array<char const*, M> const& initSQL)
: DatabaseCon(dataDir / DBName, nullptr, pragma, initSQL)
{ {
init((dataDir / DBName), pragma, initSQL);
} }
soci::session& soci::session&
@@ -136,14 +151,22 @@ public:
private: private:
template <std::size_t N, std::size_t M> template <std::size_t N, std::size_t M>
void DatabaseCon(
init(
boost::filesystem::path const& pPath, boost::filesystem::path const& pPath,
std::vector<std::string> const* commonPragma,
std::array<char const*, N> const& pragma, std::array<char const*, N> const& pragma,
std::array<char const*, M> const& initSQL) std::array<char const*, M> const& initSQL)
{ {
open(session_, "sqlite", pPath.string()); open(session_, "sqlite", pPath.string());
if (commonPragma)
{
for (auto const& p : *commonPragma)
{
soci::statement st = session_.prepare << p;
st.execute(true);
}
}
for (auto const& p : pragma) for (auto const& p : pragma)
{ {
soci::statement st = session_.prepare << p; soci::statement st = session_.prepare << p;
@@ -163,7 +186,9 @@ private:
}; };
DatabaseCon::Setup DatabaseCon::Setup
setup_DatabaseCon(Config const& c); setup_DatabaseCon(
Config const& c,
boost::optional<beast::Journal> j = boost::none);
} // namespace ripple } // namespace ripple

View File

@@ -442,7 +442,8 @@ Config::loadFromString(std::string const& fileContents)
if (getSingleSection(secConfig, SECTION_LEDGER_HISTORY, strTemp, j_)) if (getSingleSection(secConfig, SECTION_LEDGER_HISTORY, strTemp, j_))
{ {
if (boost::iequals(strTemp, "full")) if (boost::iequals(strTemp, "full"))
LEDGER_HISTORY = 1000000000u; LEDGER_HISTORY =
std::numeric_limits<decltype(LEDGER_HISTORY)>::max();
else if (boost::iequals(strTemp, "none")) else if (boost::iequals(strTemp, "none"))
LEDGER_HISTORY = 0; LEDGER_HISTORY = 0;
else else
@@ -454,7 +455,7 @@ Config::loadFromString(std::string const& fileContents)
if (boost::iequals(strTemp, "none")) if (boost::iequals(strTemp, "none"))
FETCH_DEPTH = 0; FETCH_DEPTH = 0;
else if (boost::iequals(strTemp, "full")) else if (boost::iequals(strTemp, "full"))
FETCH_DEPTH = 1000000000u; FETCH_DEPTH = std::numeric_limits<decltype(FETCH_DEPTH)>::max();
else else
FETCH_DEPTH = beast::lexicalCastThrow<std::uint32_t>(strTemp); FETCH_DEPTH = beast::lexicalCastThrow<std::uint32_t>(strTemp);

View File

@@ -21,12 +21,14 @@
#include <ripple/basics/contract.h> #include <ripple/basics/contract.h>
#include <ripple/core/DatabaseCon.h> #include <ripple/core/DatabaseCon.h>
#include <ripple/core/SociDB.h> #include <ripple/core/SociDB.h>
#include <boost/algorithm/string.hpp>
#include <boost/format.hpp>
#include <memory> #include <memory>
namespace ripple { namespace ripple {
DatabaseCon::Setup DatabaseCon::Setup
setup_DatabaseCon(Config const& c) setup_DatabaseCon(Config const& c, boost::optional<beast::Journal> j)
{ {
DatabaseCon::Setup setup; DatabaseCon::Setup setup;
@@ -38,9 +40,134 @@ setup_DatabaseCon(Config const& c)
Throw<std::runtime_error>("database_path must be set."); Throw<std::runtime_error>("database_path must be set.");
} }
if (!setup.globalPragma)
{
setup.globalPragma = [&c, &j]() {
auto const& sqlite = c.section("sqlite");
auto result = std::make_unique<std::vector<std::string>>();
result->reserve(3);
// defaults
std::string safety_level;
std::string journal_mode = "wal";
std::string synchronous = "normal";
std::string temp_store = "file";
bool showRiskWarning = false;
if (set(safety_level, "safety_level", sqlite))
{
if (boost::iequals(safety_level, "low"))
{
// low safety defaults
journal_mode = "memory";
synchronous = "off";
temp_store = "memory";
showRiskWarning = true;
}
else if (!boost::iequals(safety_level, "high"))
{
Throw<std::runtime_error>(
"Invalid safety_level value: " + safety_level);
}
}
{
// #journal_mode Valid values : delete, truncate, persist,
// memory, wal, off
if (set(journal_mode, "journal_mode", sqlite) &&
!safety_level.empty())
{
Throw<std::runtime_error>(
"Configuration file may not define both "
"\"safety_level\" and \"journal_mode\"");
}
bool higherRisk = boost::iequals(journal_mode, "memory") ||
boost::iequals(journal_mode, "off");
showRiskWarning = showRiskWarning || higherRisk;
if (higherRisk || boost::iequals(journal_mode, "delete") ||
boost::iequals(journal_mode, "truncate") ||
boost::iequals(journal_mode, "persist") ||
boost::iequals(journal_mode, "wal"))
{
result->emplace_back(boost::str(
boost::format(CommonDBPragmaJournal) % journal_mode));
}
else
{
Throw<std::runtime_error>(
"Invalid journal_mode value: " + journal_mode);
}
}
{
//#synchronous Valid values : off, normal, full, extra
if (set(synchronous, "synchronous", sqlite) &&
!safety_level.empty())
{
Throw<std::runtime_error>(
"Configuration file may not define both "
"\"safety_level\" and \"synchronous\"");
}
bool higherRisk = boost::iequals(synchronous, "off");
showRiskWarning = showRiskWarning || higherRisk;
if (higherRisk || boost::iequals(synchronous, "normal") ||
boost::iequals(synchronous, "full") ||
boost::iequals(synchronous, "extra"))
{
result->emplace_back(boost::str(
boost::format(CommonDBPragmaSync) % synchronous));
}
else
{
Throw<std::runtime_error>(
"Invalid synchronous value: " + synchronous);
}
}
{
// #temp_store Valid values : default, file, memory
if (set(temp_store, "temp_store", sqlite) &&
!safety_level.empty())
{
Throw<std::runtime_error>(
"Configuration file may not define both "
"\"safety_level\" and \"temp_store\"");
}
bool higherRisk = boost::iequals(temp_store, "memory");
showRiskWarning = showRiskWarning || higherRisk;
if (higherRisk || boost::iequals(temp_store, "default") ||
boost::iequals(temp_store, "file"))
{
result->emplace_back(boost::str(
boost::format(CommonDBPragmaTemp) % temp_store));
}
else
{
Throw<std::runtime_error>(
"Invalid temp_store value: " + temp_store);
}
}
if (showRiskWarning && j && c.LEDGER_HISTORY > SQLITE_TUNING_CUTOFF)
{
JLOG(j->warn())
<< "reducing the data integrity guarantees from the "
"default [sqlite] behavior is not recommended for "
"nodes storing large amounts of history, because of the "
"difficulty inherent in rebuilding corrupted data.";
}
assert(result->size() == 3);
return result;
}();
}
setup.useGlobalPragma = true;
return setup; return setup;
} }
std::unique_ptr<std::vector<std::string> const>
DatabaseCon::Setup::globalPragma;
void void
DatabaseCon::setupCheckpointing(JobQueue* q, Logs& l) DatabaseCon::setupCheckpointing(JobQueue* q, Logs& l)
{ {

View File

@@ -50,7 +50,9 @@ DatabaseBody::value_type::open(
auto setup = setup_DatabaseCon(config); auto setup = setup_DatabaseCon(config);
setup.dataDir = path.parent_path(); setup.dataDir = path.parent_path();
setup.useGlobalPragma = false;
// Downloader ignores the "CommonPragma"
conn_ = std::make_unique<DatabaseCon>( conn_ = std::make_unique<DatabaseCon>(
setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit);

View File

@@ -124,6 +124,7 @@ Shard::open(Scheduler& scheduler, nudb::context& ctx)
setup.startUp = config.START_UP; setup.startUp = config.START_UP;
setup.standAlone = config.standalone(); setup.standAlone = config.standalone();
setup.dataDir = dir_; setup.dataDir = dir_;
setup.useGlobalPragma = true;
acquireInfo_->SQLiteDB = std::make_unique<DatabaseCon>( acquireInfo_->SQLiteDB = std::make_unique<DatabaseCon>(
setup, setup,
@@ -668,10 +669,14 @@ bool
Shard::initSQLite(std::lock_guard<std::recursive_mutex> const&) Shard::initSQLite(std::lock_guard<std::recursive_mutex> const&)
{ {
Config const& config{app_.config()}; Config const& config{app_.config()};
DatabaseCon::Setup setup; DatabaseCon::Setup const setup = [&]() {
setup.startUp = config.START_UP; DatabaseCon::Setup result;
setup.standAlone = config.standalone(); result.startUp = config.START_UP;
setup.dataDir = dir_; result.standAlone = config.standalone();
result.dataDir = dir_;
result.useGlobalPragma = !backendComplete_;
return result;
}();
try try
{ {

View File

@@ -27,6 +27,7 @@
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include <test/jtx.h> #include <test/jtx.h>
#include <test/jtx/CheckMessageLogs.h>
namespace ripple { namespace ripple {
namespace test { namespace test {
@@ -34,56 +35,6 @@ namespace test {
class LedgerHistory_test : public beast::unit_test::suite class LedgerHistory_test : public beast::unit_test::suite
{ {
public: public:
/** Log manager that searches for a specific message substring
*/
class CheckMessageLogs : public Logs
{
std::string msg_;
bool& found_;
class CheckMessageSink : public beast::Journal::Sink
{
CheckMessageLogs& owner_;
public:
CheckMessageSink(
beast::severities::Severity threshold,
CheckMessageLogs& owner)
: beast::Journal::Sink(threshold, false), owner_(owner)
{
}
void
write(beast::severities::Severity level, std::string const& text)
override
{
if (text.find(owner_.msg_) != std::string::npos)
owner_.found_ = true;
}
};
public:
/** Constructor
@param msg The message string to search for
@param found The variable to set to true if the message is found
*/
CheckMessageLogs(std::string msg, bool& found)
: Logs{beast::severities::kDebug}
, msg_{std::move(msg)}
, found_{found}
{
}
std::unique_ptr<beast::Journal::Sink>
makeSink(
std::string const& partition,
beast::severities::Severity threshold) override
{
return std::make_unique<CheckMessageSink>(threshold, *this);
}
};
/** Generate a new ledger by hand, applying a specific close time offset /** Generate a new ledger by hand, applying a specific close time offset
and optionally inserting a transaction. and optionally inserting a transaction.
@@ -149,7 +100,7 @@ public:
Env env{ Env env{
*this, *this,
envconfig(), envconfig(),
std::make_unique<CheckMessageLogs>("MISMATCH ", found)}; std::make_unique<CheckMessageLogs>("MISMATCH ", &found)};
LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()};
auto const genesis = makeLedger({}, env, lh, 0s); auto const genesis = makeLedger({}, env, lh, 0s);
uint256 const dummyTxHash{1}; uint256 const dummyTxHash{1};
@@ -166,7 +117,7 @@ public:
*this, *this,
envconfig(), envconfig(),
std::make_unique<CheckMessageLogs>( std::make_unique<CheckMessageLogs>(
"MISMATCH on close time", found)}; "MISMATCH on close time", &found)};
LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()};
auto const genesis = makeLedger({}, env, lh, 0s); auto const genesis = makeLedger({}, env, lh, 0s);
auto const ledgerA = makeLedger(genesis, env, lh, 4s); auto const ledgerA = makeLedger(genesis, env, lh, 4s);
@@ -186,7 +137,7 @@ public:
*this, *this,
envconfig(), envconfig(),
std::make_unique<CheckMessageLogs>( std::make_unique<CheckMessageLogs>(
"MISMATCH on prior ledger", found)}; "MISMATCH on prior ledger", &found)};
LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()};
auto const genesis = makeLedger({}, env, lh, 0s); auto const genesis = makeLedger({}, env, lh, 0s);
auto const ledgerA = makeLedger(genesis, env, lh, 4s); auto const ledgerA = makeLedger(genesis, env, lh, 4s);
@@ -212,7 +163,7 @@ public:
Env env{ Env env{
*this, *this,
envconfig(), envconfig(),
std::make_unique<CheckMessageLogs>(msg, found)}; std::make_unique<CheckMessageLogs>(msg, &found)};
LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()};
Account alice{"A1"}; Account alice{"A1"};

View File

@@ -256,6 +256,7 @@ public:
{ {
DatabaseCon::Setup setup; DatabaseCon::Setup setup;
setup.dataDir = getDatabasePath(); setup.dataDir = getDatabasePath();
BEAST_EXPECT(!setup.useGlobalPragma);
DatabaseCon dbCon( DatabaseCon dbCon(
setup, setup,
dbName.data(), dbName.data(),

View File

@@ -0,0 +1,80 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/Log.h>
namespace ripple {
namespace test {
/**
* @brief Log manager for CaptureSinks. This class holds the stream
* instance that is written to by the sinks. Upon destruction, all
* contents of the stream are assigned to the string specified in the
* ctor
*/
class CaptureLogs : public Logs
{
std::stringstream strm_;
std::string* pResult_;
/**
* @brief sink for writing all log messages to a stringstream
*/
class CaptureSink : public beast::Journal::Sink
{
std::stringstream& strm_;
public:
CaptureSink(
beast::severities::Severity threshold,
std::stringstream& strm)
: beast::Journal::Sink(threshold, false), strm_(strm)
{
}
void
write(beast::severities::Severity level, std::string const& text)
override
{
strm_ << text;
}
};
public:
explicit CaptureLogs(std::string* pResult)
: Logs(beast::severities::kInfo), pResult_(pResult)
{
}
~CaptureLogs() override
{
*pResult_ = strm_.str();
}
std::unique_ptr<beast::Journal::Sink>
makeSink(
std::string const& partition,
beast::severities::Severity threshold) override
{
return std::make_unique<CaptureSink>(threshold, strm_);
}
};
} // namespace test
} // namespace ripple

View File

@@ -0,0 +1,75 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/Log.h>
namespace ripple {
namespace test {
/** Log manager that searches for a specific message substring
*/
class CheckMessageLogs : public Logs
{
std::string msg_;
bool* pFound_;
class CheckMessageSink : public beast::Journal::Sink
{
CheckMessageLogs& owner_;
public:
CheckMessageSink(
beast::severities::Severity threshold,
CheckMessageLogs& owner)
: beast::Journal::Sink(threshold, false), owner_(owner)
{
}
void
write(beast::severities::Severity level, std::string const& text)
override
{
if (text.find(owner_.msg_) != std::string::npos)
*owner_.pFound_ = true;
}
};
public:
/** Constructor
@param msg The message string to search for
@param pFound Pointer to the variable to set to true if the message is
found
*/
CheckMessageLogs(std::string msg, bool* pFound)
: Logs{beast::severities::kDebug}, msg_{std::move(msg)}, pFound_{pFound}
{
}
std::unique_ptr<beast::Journal::Sink>
makeSink(
std::string const& partition,
beast::severities::Severity threshold) override
{
return std::make_unique<CheckMessageSink>(threshold, *this);
}
};
} // namespace test
} // namespace ripple

View File

@@ -27,6 +27,7 @@
#include <ripple/basics/Log.h> #include <ripple/basics/Log.h>
#include <ripple/basics/chrono.h> #include <ripple/basics/chrono.h>
#include <ripple/beast/cxx17/type_traits.h> // <type_traits> #include <ripple/beast/cxx17/type_traits.h> // <type_traits>
#include <ripple/beast/utility/Journal.h>
#include <ripple/core/Config.h> #include <ripple/core/Config.h>
#include <ripple/json/json_value.h> #include <ripple/json/json_value.h>
#include <ripple/json/to_string.h> #include <ripple/json/to_string.h>
@@ -131,7 +132,8 @@ private:
AppBundle( AppBundle(
beast::unit_test::suite& suite, beast::unit_test::suite& suite,
std::unique_ptr<Config> config, std::unique_ptr<Config> config,
std::unique_ptr<Logs> logs); std::unique_ptr<Logs> logs,
beast::severities::Severity thresh);
~AppBundle(); ~AppBundle();
}; };
@@ -163,12 +165,10 @@ public:
Env(beast::unit_test::suite& suite_, Env(beast::unit_test::suite& suite_,
std::unique_ptr<Config> config, std::unique_ptr<Config> config,
FeatureBitset features, FeatureBitset features,
std::unique_ptr<Logs> logs = nullptr) std::unique_ptr<Logs> logs = nullptr,
beast::severities::Severity thresh = beast::severities::kError)
: test(suite_) : test(suite_)
, bundle_( , bundle_(suite_, std::move(config), std::move(logs), thresh)
suite_,
std::move(config),
logs ? std::move(logs) : std::make_unique<SuiteLogs>(suite_))
, journal{bundle_.app->journal("Env")} , journal{bundle_.app->journal("Env")}
{ {
memoize(Account::master); memoize(Account::master);
@@ -211,11 +211,13 @@ public:
*/ */
Env(beast::unit_test::suite& suite_, Env(beast::unit_test::suite& suite_,
std::unique_ptr<Config> config, std::unique_ptr<Config> config,
std::unique_ptr<Logs> logs = nullptr) std::unique_ptr<Logs> logs = nullptr,
beast::severities::Severity thresh = beast::severities::kError)
: Env(suite_, : Env(suite_,
std::move(config), std::move(config),
supported_amendments(), supported_amendments(),
std::move(logs)) std::move(logs),
thresh)
{ {
} }

View File

@@ -59,12 +59,22 @@ namespace jtx {
Env::AppBundle::AppBundle( Env::AppBundle::AppBundle(
beast::unit_test::suite& suite, beast::unit_test::suite& suite,
std::unique_ptr<Config> config, std::unique_ptr<Config> config,
std::unique_ptr<Logs> logs) std::unique_ptr<Logs> logs,
beast::severities::Severity thresh)
: AppBundle() : AppBundle()
{ {
using namespace beast::severities; using namespace beast::severities;
// Use kFatal threshold to reduce noise from STObject. if (logs)
setDebugLogSink(std::make_unique<SuiteJournalSink>("Debug", kFatal, suite)); {
setDebugLogSink(logs->makeSink("Debug", kFatal));
}
else
{
logs = std::make_unique<SuiteLogs>(suite);
// Use kFatal threshold to reduce noise from STObject.
setDebugLogSink(
std::make_unique<SuiteJournalSink>("Debug", kFatal, suite));
}
auto timeKeeper_ = std::make_unique<ManualTimeKeeper>(); auto timeKeeper_ = std::make_unique<ManualTimeKeeper>();
timeKeeper = timeKeeper_.get(); timeKeeper = timeKeeper_.get();
// Hack so we don't have to call Config::setup // Hack so we don't have to call Config::setup
@@ -72,7 +82,7 @@ Env::AppBundle::AppBundle(
owned = make_Application( owned = make_Application(
std::move(config), std::move(logs), std::move(timeKeeper_)); std::move(config), std::move(logs), std::move(timeKeeper_));
app = owned.get(); app = owned.get();
app->logs().threshold(kError); app->logs().threshold(thresh);
if (!app->setup()) if (!app->setup())
Throw<std::runtime_error>("Env::AppBundle: setup failed"); Throw<std::runtime_error>("Env::AppBundle: setup failed");
timeKeeper->set(app->getLedgerMaster().getClosedLedger()->info().closeTime); timeKeeper->set(app->getLedgerMaster().getClosedLedger()->info().closeTime);

View File

@@ -18,8 +18,12 @@
//============================================================================== //==============================================================================
#include <ripple/beast/utility/temp_dir.h> #include <ripple/beast/utility/temp_dir.h>
#include <ripple/core/DatabaseCon.h>
#include <ripple/nodestore/DummyScheduler.h> #include <ripple/nodestore/DummyScheduler.h>
#include <ripple/nodestore/Manager.h> #include <ripple/nodestore/Manager.h>
#include <test/jtx.h>
#include <test/jtx/CheckMessageLogs.h>
#include <test/jtx/envconfig.h>
#include <test/nodestore/TestBase.h> #include <test/nodestore/TestBase.h>
#include <test/unit_test/SuiteJournal.h> #include <test/unit_test/SuiteJournal.h>
@@ -35,6 +39,409 @@ public:
{ {
} }
void
testConfig()
{
testcase("Config");
using namespace ripple::test;
using namespace ripple::test::jtx;
auto const integrityWarning =
"reducing the data integrity guarantees from the "
"default [sqlite] behavior is not recommended for "
"nodes storing large amounts of history, because of the "
"difficulty inherent in rebuilding corrupted data.";
{
// defaults
Env env(*this);
auto const s = setup_DatabaseCon(env.app().config());
if (BEAST_EXPECT(s.globalPragma->size() == 3))
{
BEAST_EXPECT(
s.globalPragma->at(0) == "PRAGMA journal_mode=wal;");
BEAST_EXPECT(
s.globalPragma->at(1) == "PRAGMA synchronous=normal;");
BEAST_EXPECT(
s.globalPragma->at(2) == "PRAGMA temp_store=file;");
}
}
{
// High safety level
DatabaseCon::Setup::globalPragma.reset();
bool found = false;
Env env = [&]() {
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("safety_level", "high");
}
p->LEDGER_HISTORY = 100'000'000;
return Env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(
integrityWarning, &found),
beast::severities::kWarning);
}();
BEAST_EXPECT(!found);
auto const s = setup_DatabaseCon(env.app().config());
if (BEAST_EXPECT(s.globalPragma->size() == 3))
{
BEAST_EXPECT(
s.globalPragma->at(0) == "PRAGMA journal_mode=wal;");
BEAST_EXPECT(
s.globalPragma->at(1) == "PRAGMA synchronous=normal;");
BEAST_EXPECT(
s.globalPragma->at(2) == "PRAGMA temp_store=file;");
}
}
{
// Low safety level
DatabaseCon::Setup::globalPragma.reset();
bool found = false;
Env env = [&]() {
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("safety_level", "low");
}
p->LEDGER_HISTORY = 100'000'000;
return Env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(
integrityWarning, &found),
beast::severities::kWarning);
}();
BEAST_EXPECT(found);
auto const s = setup_DatabaseCon(env.app().config());
if (BEAST_EXPECT(s.globalPragma->size() == 3))
{
BEAST_EXPECT(
s.globalPragma->at(0) == "PRAGMA journal_mode=memory;");
BEAST_EXPECT(
s.globalPragma->at(1) == "PRAGMA synchronous=off;");
BEAST_EXPECT(
s.globalPragma->at(2) == "PRAGMA temp_store=memory;");
}
}
{
// Override individual settings
DatabaseCon::Setup::globalPragma.reset();
bool found = false;
Env env = [&]() {
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("journal_mode", "off");
section.set("synchronous", "extra");
section.set("temp_store", "default");
}
return Env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(
integrityWarning, &found),
beast::severities::kWarning);
}();
// No warning, even though higher risk settings were used because
// LEDGER_HISTORY is small
BEAST_EXPECT(!found);
auto const s = setup_DatabaseCon(env.app().config());
if (BEAST_EXPECT(s.globalPragma->size() == 3))
{
BEAST_EXPECT(
s.globalPragma->at(0) == "PRAGMA journal_mode=off;");
BEAST_EXPECT(
s.globalPragma->at(1) == "PRAGMA synchronous=extra;");
BEAST_EXPECT(
s.globalPragma->at(2) == "PRAGMA temp_store=default;");
}
}
{
// Override individual settings with large history
DatabaseCon::Setup::globalPragma.reset();
bool found = false;
Env env = [&]() {
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("journal_mode", "off");
section.set("synchronous", "extra");
section.set("temp_store", "default");
}
p->LEDGER_HISTORY = 50'000'000;
return Env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(
integrityWarning, &found),
beast::severities::kWarning);
}();
// No warning, even though higher risk settings were used because
// LEDGER_HISTORY is small
BEAST_EXPECT(found);
auto const s = setup_DatabaseCon(env.app().config());
if (BEAST_EXPECT(s.globalPragma->size() == 3))
{
BEAST_EXPECT(
s.globalPragma->at(0) == "PRAGMA journal_mode=off;");
BEAST_EXPECT(
s.globalPragma->at(1) == "PRAGMA synchronous=extra;");
BEAST_EXPECT(
s.globalPragma->at(2) == "PRAGMA temp_store=default;");
}
}
{
// Error: Mix safety_level and individual settings
DatabaseCon::Setup::globalPragma.reset();
auto const expected =
"Failed to initialize SQLite databases: "
"Configuration file may not define both \"safety_level\" and "
"\"journal_mode\"";
bool found = false;
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("safety_level", "low");
section.set("journal_mode", "off");
section.set("synchronous", "extra");
section.set("temp_store", "default");
}
try
{
Env env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(expected, &found),
beast::severities::kWarning);
fail();
}
catch (...)
{
BEAST_EXPECT(found);
}
}
{
// Error: Mix safety_level and one setting (gotta catch 'em all)
DatabaseCon::Setup::globalPragma.reset();
auto const expected =
"Failed to initialize SQLite databases: Configuration file may "
"not define both \"safety_level\" and \"journal_mode\"";
bool found = false;
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("safety_level", "high");
section.set("journal_mode", "off");
}
try
{
Env env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(expected, &found),
beast::severities::kWarning);
fail();
}
catch (...)
{
BEAST_EXPECT(found);
}
}
{
// Error: Mix safety_level and one setting (gotta catch 'em all)
DatabaseCon::Setup::globalPragma.reset();
auto const expected =
"Failed to initialize SQLite databases: Configuration file may "
"not define both \"safety_level\" and \"synchronous\"";
bool found = false;
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("safety_level", "low");
section.set("synchronous", "extra");
}
try
{
Env env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(expected, &found),
beast::severities::kWarning);
fail();
}
catch (...)
{
BEAST_EXPECT(found);
}
}
{
// Error: Mix safety_level and one setting (gotta catch 'em all)
DatabaseCon::Setup::globalPragma.reset();
auto const expected =
"Failed to initialize SQLite databases: Configuration file may "
"not define both \"safety_level\" and \"temp_store\"";
bool found = false;
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("safety_level", "high");
section.set("temp_store", "default");
}
try
{
Env env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(expected, &found),
beast::severities::kWarning);
fail();
}
catch (...)
{
BEAST_EXPECT(found);
}
}
{
// Error: Invalid value
DatabaseCon::Setup::globalPragma.reset();
auto const expected =
"Failed to initialize SQLite databases: Invalid safety_level "
"value: slow";
bool found = false;
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("safety_level", "slow");
}
try
{
Env env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(expected, &found),
beast::severities::kWarning);
fail();
}
catch (...)
{
BEAST_EXPECT(found);
}
}
{
// Error: Invalid value
DatabaseCon::Setup::globalPragma.reset();
auto const expected =
"Failed to initialize SQLite databases: Invalid journal_mode "
"value: fast";
bool found = false;
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("journal_mode", "fast");
}
try
{
Env env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(expected, &found),
beast::severities::kWarning);
fail();
}
catch (...)
{
BEAST_EXPECT(found);
}
}
{
// Error: Invalid value
DatabaseCon::Setup::globalPragma.reset();
auto const expected =
"Failed to initialize SQLite databases: Invalid synchronous "
"value: instant";
bool found = false;
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("synchronous", "instant");
}
try
{
Env env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(expected, &found),
beast::severities::kWarning);
fail();
}
catch (...)
{
BEAST_EXPECT(found);
}
}
{
// Error: Invalid value
DatabaseCon::Setup::globalPragma.reset();
auto const expected =
"Failed to initialize SQLite databases: Invalid temp_store "
"value: network";
bool found = false;
auto p = test::jtx::envconfig();
{
auto& section = p->section("sqlite");
section.set("temp_store", "network");
}
try
{
Env env(
*this,
std::move(p),
std::make_unique<CheckMessageLogs>(expected, &found),
beast::severities::kWarning);
fail();
}
catch (...)
{
BEAST_EXPECT(found);
}
}
}
//--------------------------------------------------------------------------
void void
testImport( testImport(
std::string const& destBackendType, std::string const& destBackendType,
@@ -221,6 +628,8 @@ public:
{ {
std::int64_t const seedValue = 50; std::int64_t const seedValue = 50;
testConfig();
testNodeStore("memory", false, seedValue); testNodeStore("memory", false, seedValue);
// Persistent backend tests // Persistent backend tests

View File

@@ -31,6 +31,7 @@
#include <chrono> #include <chrono>
#include <stdexcept> #include <stdexcept>
#include <test/jtx.h> #include <test/jtx.h>
#include <test/jtx/CaptureLogs.h>
#include <test/jtx/envconfig.h> #include <test/jtx/envconfig.h>
#include <test/unit_test/SuiteJournal.h> #include <test/unit_test/SuiteJournal.h>
#include <thread> #include <thread>
@@ -375,60 +376,6 @@ public:
pass(); pass();
} }
/**
* @brief sink for writing all log messages to a stringstream
*/
class CaptureSink : public beast::Journal::Sink
{
std::stringstream& strm_;
public:
CaptureSink(
beast::severities::Severity threshold,
std::stringstream& strm)
: beast::Journal::Sink(threshold, false), strm_(strm)
{
}
void
write(beast::severities::Severity level, std::string const& text)
override
{
strm_ << text;
}
};
/**
* @brief Log manager for CaptureSinks. This class holds the stream
* instance that is written to by the sinks. Upon destruction, all
* contents of the stream are assigned to the string specified in the
* ctor
*/
class CaptureLogs : public Logs
{
std::stringstream strm_;
std::string& result_;
public:
explicit CaptureLogs(std::string& result)
: Logs(beast::severities::kInfo), result_(result)
{
}
~CaptureLogs() override
{
result_ = strm_.str();
}
std::unique_ptr<beast::Journal::Sink>
makeSink(
std::string const& partition,
beast::severities::Severity threshold) override
{
return std::make_unique<CaptureSink>(threshold, strm_);
}
};
void void
testBadConfig() testBadConfig()
{ {
@@ -444,7 +391,7 @@ public:
(*cfg).deprecatedClearSection("port_rpc"); (*cfg).deprecatedClearSection("port_rpc");
return cfg; return cfg;
}), }),
std::make_unique<CaptureLogs>(messages)}; std::make_unique<CaptureLogs>(&messages)};
}); });
BEAST_EXPECT( BEAST_EXPECT(
messages.find("Missing 'ip' in [port_rpc]") != std::string::npos); messages.find("Missing 'ip' in [port_rpc]") != std::string::npos);
@@ -457,7 +404,7 @@ public:
(*cfg)["port_rpc"].set("ip", getEnvLocalhostAddr()); (*cfg)["port_rpc"].set("ip", getEnvLocalhostAddr());
return cfg; return cfg;
}), }),
std::make_unique<CaptureLogs>(messages)}; std::make_unique<CaptureLogs>(&messages)};
}); });
BEAST_EXPECT( BEAST_EXPECT(
messages.find("Missing 'port' in [port_rpc]") != std::string::npos); messages.find("Missing 'port' in [port_rpc]") != std::string::npos);
@@ -471,7 +418,7 @@ public:
(*cfg)["port_rpc"].set("port", "0"); (*cfg)["port_rpc"].set("port", "0");
return cfg; return cfg;
}), }),
std::make_unique<CaptureLogs>(messages)}; std::make_unique<CaptureLogs>(&messages)};
}); });
BEAST_EXPECT( BEAST_EXPECT(
messages.find("Invalid value '0' for key 'port' in [port_rpc]") != messages.find("Invalid value '0' for key 'port' in [port_rpc]") !=
@@ -487,7 +434,7 @@ public:
(*cfg)["port_rpc"].set("protocol", ""); (*cfg)["port_rpc"].set("protocol", "");
return cfg; return cfg;
}), }),
std::make_unique<CaptureLogs>(messages)}; std::make_unique<CaptureLogs>(&messages)};
}); });
BEAST_EXPECT( BEAST_EXPECT(
messages.find("Missing 'protocol' in [port_rpc]") != messages.find("Missing 'protocol' in [port_rpc]") !=
@@ -522,7 +469,7 @@ public:
(*cfg)["port_ws"].set("admin", getEnvLocalhostAddr()); (*cfg)["port_ws"].set("admin", getEnvLocalhostAddr());
return cfg; return cfg;
}), }),
std::make_unique<CaptureLogs>(messages)}; std::make_unique<CaptureLogs>(&messages)};
}); });
BEAST_EXPECT( BEAST_EXPECT(
messages.find("Required section [server] is missing") != messages.find("Required section [server] is missing") !=
@@ -548,7 +495,7 @@ public:
(*cfg)["server"].append("port_ws"); (*cfg)["server"].append("port_ws");
return cfg; return cfg;
}), }),
std::make_unique<CaptureLogs>(messages)}; std::make_unique<CaptureLogs>(&messages)};
}); });
BEAST_EXPECT( BEAST_EXPECT(
messages.find("Missing section: [port_peer]") != std::string::npos); messages.find("Missing section: [port_peer]") != std::string::npos);