mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-02 00:45:58 +00:00
Remove SQLite Validations table
This commit is contained in:
committed by
Nik Bougalis
parent
e1adbd7ddd
commit
c5a95f1eb5
@@ -114,7 +114,6 @@ mismatch(RCLValidatedLedger const& a, RCLValidatedLedger const& b)
|
||||
RCLValidationsAdaptor::RCLValidationsAdaptor(Application& app, beast::Journal j)
|
||||
: app_(app), j_(j)
|
||||
{
|
||||
staleValidations_.reserve(512);
|
||||
}
|
||||
|
||||
NetClock::time_point
|
||||
@@ -148,131 +147,6 @@ RCLValidationsAdaptor::acquire(LedgerHash const & hash)
|
||||
return RCLValidatedLedger(std::move(ledger), j_);
|
||||
}
|
||||
|
||||
void
|
||||
RCLValidationsAdaptor::onStale(RCLValidation&& v)
|
||||
{
|
||||
// Store the newly stale validation; do not do significant work in this
|
||||
// function since this is a callback from Validations, which may be
|
||||
// doing other work.
|
||||
|
||||
ScopedLockType sl(staleLock_);
|
||||
staleValidations_.emplace_back(std::move(v));
|
||||
if (staleWriting_)
|
||||
return;
|
||||
|
||||
// addJob() may return false (Job not added) at shutdown.
|
||||
staleWriting_ = app_.getJobQueue().addJob(
|
||||
jtWRITE, "Validations::doStaleWrite", [this](Job&) {
|
||||
auto event =
|
||||
app_.getJobQueue().makeLoadEvent(jtDISK, "ValidationWrite");
|
||||
ScopedLockType sl(staleLock_);
|
||||
doStaleWrite(sl);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
RCLValidationsAdaptor::flush(hash_map<NodeID, RCLValidation>&& remaining)
|
||||
{
|
||||
bool anyNew = false;
|
||||
{
|
||||
ScopedLockType sl(staleLock_);
|
||||
|
||||
for (auto const& keyVal : remaining)
|
||||
{
|
||||
staleValidations_.emplace_back(std::move(keyVal.second));
|
||||
anyNew = true;
|
||||
}
|
||||
|
||||
// If we have new validations to write and there isn't a write in
|
||||
// progress already, then write to the database synchronously.
|
||||
if (anyNew && !staleWriting_)
|
||||
{
|
||||
staleWriting_ = true;
|
||||
doStaleWrite(sl);
|
||||
}
|
||||
|
||||
// In the case when a prior asynchronous doStaleWrite was scheduled,
|
||||
// this loop will block until all validations have been flushed.
|
||||
// This ensures that all validations are written upon return from
|
||||
// this function.
|
||||
|
||||
while (staleWriting_)
|
||||
{
|
||||
ScopedUnlockType sul(staleLock_);
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: doStaleWrite() must be called with staleLock_ *locked*. The passed
|
||||
// ScopedLockType& acts as a reminder to future maintainers.
|
||||
void
|
||||
RCLValidationsAdaptor::doStaleWrite(ScopedLockType&)
|
||||
{
|
||||
static const std::string insVal(
|
||||
"INSERT INTO Validations "
|
||||
"(InitialSeq, LedgerSeq, LedgerHash,NodePubKey,SignTime,RawData) "
|
||||
"VALUES (:initialSeq, :ledgerSeq, "
|
||||
":ledgerHash,:nodePubKey,:signTime,:rawData);");
|
||||
static const std::string findSeq(
|
||||
"SELECT LedgerSeq FROM Ledgers WHERE Ledgerhash=:ledgerHash;");
|
||||
|
||||
assert(staleWriting_);
|
||||
|
||||
while (!staleValidations_.empty())
|
||||
{
|
||||
std::vector<RCLValidation> currentStale;
|
||||
currentStale.reserve(512);
|
||||
staleValidations_.swap(currentStale);
|
||||
|
||||
{
|
||||
ScopedUnlockType sul(staleLock_);
|
||||
{
|
||||
auto db = app_.getLedgerDB().checkoutDb();
|
||||
|
||||
Serializer s(1024);
|
||||
soci::transaction tr(*db);
|
||||
for (RCLValidation const& wValidation : currentStale)
|
||||
{
|
||||
// Only save full validations until we update the schema
|
||||
if(!wValidation.full())
|
||||
continue;
|
||||
s.erase();
|
||||
STValidation::pointer const& val = wValidation.unwrap();
|
||||
val->add(s);
|
||||
|
||||
auto const ledgerHash = to_string(val->getLedgerHash());
|
||||
|
||||
boost::optional<std::uint64_t> ledgerSeq;
|
||||
*db << findSeq, soci::use(ledgerHash),
|
||||
soci::into(ledgerSeq);
|
||||
|
||||
auto const initialSeq = ledgerSeq.value_or(
|
||||
app_.getLedgerMaster().getCurrentLedgerIndex());
|
||||
auto const nodePubKey = toBase58(
|
||||
TokenType::NodePublic, val->getSignerPublic());
|
||||
auto const signTime =
|
||||
val->getSignTime().time_since_epoch().count();
|
||||
|
||||
soci::blob rawData(*db);
|
||||
rawData.append(
|
||||
reinterpret_cast<const char*>(s.peekData().data()),
|
||||
s.peekData().size());
|
||||
assert(rawData.get_len() == s.peekData().size());
|
||||
|
||||
*db << insVal, soci::use(initialSeq), soci::use(ledgerSeq),
|
||||
soci::use(ledgerHash), soci::use(nodePubKey),
|
||||
soci::use(signTime), soci::use(rawData);
|
||||
}
|
||||
|
||||
tr.commit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
staleWriting_ = false;
|
||||
}
|
||||
|
||||
bool
|
||||
handleNewValidation(Application& app,
|
||||
STValidation::ref val,
|
||||
|
||||
@@ -211,24 +211,6 @@ public:
|
||||
NetClock::time_point
|
||||
now() const;
|
||||
|
||||
/** Handle a newly stale validation.
|
||||
|
||||
@param v The newly stale validation
|
||||
|
||||
@warning This should do minimal work, as it is expected to be called
|
||||
by the generic Validations code while it may be holding an
|
||||
internal lock
|
||||
*/
|
||||
void
|
||||
onStale(RCLValidation&& v);
|
||||
|
||||
/** Flush current validations to disk before shutdown.
|
||||
|
||||
@param remaining The remaining validations to flush
|
||||
*/
|
||||
void
|
||||
flush(hash_map<NodeID, RCLValidation>&& remaining);
|
||||
|
||||
/** Attempt to acquire the ledger with given id from the network */
|
||||
boost::optional<RCLValidatedLedger>
|
||||
acquire(LedgerHash const & id);
|
||||
@@ -245,17 +227,6 @@ private:
|
||||
|
||||
Application& app_;
|
||||
beast::Journal j_;
|
||||
|
||||
// Lock for managing staleValidations_ and writing_
|
||||
std::mutex staleLock_;
|
||||
std::vector<RCLValidation> staleValidations_;
|
||||
bool staleWriting_ = false;
|
||||
|
||||
// Write the stale validations to sqlite DB, the scoped lock argument
|
||||
// is used to remind callers that the staleLock_ must be *locked* prior
|
||||
// to making the call
|
||||
void
|
||||
doStaleWrite(ScopedLockType&);
|
||||
};
|
||||
|
||||
/// Alias for RCL-specific instantiation of generic Validations
|
||||
|
||||
@@ -948,9 +948,6 @@ static bool saveValidatedLedger (
|
||||
VALUES
|
||||
(:ledgerHash,:ledgerSeq,:prevHash,:totalCoins,:closingTime,:prevClosingTime,
|
||||
:closeTimeRes,:closeFlags,:accountSetHash,:transSetHash);)sql");
|
||||
static std::string updateVal(
|
||||
R"sql(UPDATE Validations SET LedgerSeq = :ledgerSeq, InitialSeq = :initialSeq
|
||||
WHERE LedgerHash = :ledgerHash;)sql");
|
||||
|
||||
auto db (app.getLedgerDB ().checkoutDb ());
|
||||
|
||||
@@ -981,12 +978,6 @@ static bool saveValidatedLedger (
|
||||
soci::use(accountHash),
|
||||
soci::use(txHash);
|
||||
|
||||
|
||||
*db << updateVal,
|
||||
soci::use(seq),
|
||||
soci::use(seq),
|
||||
soci::use(hash);
|
||||
|
||||
tr.commit();
|
||||
}
|
||||
|
||||
|
||||
@@ -825,24 +825,149 @@ public:
|
||||
beast::Journal journal (std::string const& name) override;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
bool initSqliteDbs ()
|
||||
|
||||
bool
|
||||
initSQLiteDBs()
|
||||
{
|
||||
assert (mTxnDB.get () == nullptr);
|
||||
assert (mLedgerDB.get () == nullptr);
|
||||
assert (mWalletDB.get () == nullptr);
|
||||
|
||||
DatabaseCon::Setup setup = setup_DatabaseCon (*config_);
|
||||
mTxnDB = std::make_unique <DatabaseCon> (setup, TxnDBName,
|
||||
TxnDBInit, TxnDBCount);
|
||||
mLedgerDB = std::make_unique <DatabaseCon> (setup, "ledger.db",
|
||||
LedgerDBInit, LedgerDBCount);
|
||||
mWalletDB = std::make_unique <DatabaseCon> (setup, "wallet.db",
|
||||
WalletDBInit, WalletDBCount);
|
||||
try
|
||||
{
|
||||
auto const setup = setup_DatabaseCon(*config_);
|
||||
|
||||
return
|
||||
mTxnDB.get () != nullptr &&
|
||||
mLedgerDB.get () != nullptr &&
|
||||
mWalletDB.get () != nullptr;
|
||||
// transaction database
|
||||
mTxnDB = std::make_unique <DatabaseCon>(
|
||||
setup,
|
||||
TxnDBName,
|
||||
TxnDBInit,
|
||||
TxnDBCount);
|
||||
mTxnDB->getSession() <<
|
||||
boost::str(boost::format("PRAGMA cache_size=-%d;") %
|
||||
(config_->getSize(siTxnDBCache) * kilobytes(1)));
|
||||
mTxnDB->setupCheckpointing(m_jobQueue.get(), logs());
|
||||
|
||||
if (!setup.standAlone ||
|
||||
setup.startUp == Config::LOAD ||
|
||||
setup.startUp == Config::LOAD_FILE ||
|
||||
setup.startUp == Config::REPLAY)
|
||||
{
|
||||
// perform any needed table updates
|
||||
updateTxnDB();
|
||||
|
||||
// Check if AccountTransactions has primary key
|
||||
std::string cid, name, type;
|
||||
std::size_t notnull, dflt_value, pk;
|
||||
soci::indicator ind;
|
||||
soci::statement st = (mTxnDB->getSession().prepare <<
|
||||
("PRAGMA table_info(AccountTransactions);"),
|
||||
soci::into(cid),
|
||||
soci::into(name),
|
||||
soci::into(type),
|
||||
soci::into(notnull),
|
||||
soci::into(dflt_value, ind),
|
||||
soci::into(pk));
|
||||
|
||||
st.execute();
|
||||
while (st.fetch())
|
||||
{
|
||||
if (pk == 1)
|
||||
{
|
||||
JLOG(m_journal.fatal()) <<
|
||||
"AccountTransactions database "
|
||||
"should not have a primary key";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ledger database
|
||||
mLedgerDB = std::make_unique <DatabaseCon>(
|
||||
setup,
|
||||
LedgerDBName,
|
||||
LedgerDBInit,
|
||||
LedgerDBCount);
|
||||
mLedgerDB->getSession() <<
|
||||
boost::str(boost::format("PRAGMA cache_size=-%d;") %
|
||||
(config_->getSize(siLgrDBCache) * kilobytes(1)));
|
||||
mLedgerDB->setupCheckpointing(m_jobQueue.get(), logs());
|
||||
|
||||
// wallet database
|
||||
mWalletDB = std::make_unique <DatabaseCon>(
|
||||
setup,
|
||||
WalletDBName,
|
||||
WalletDBInit,
|
||||
WalletDBCount);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(m_journal.fatal()) <<
|
||||
"Failed to initialize SQLite databases: " << e.what();
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
initNodeStoreDBs()
|
||||
{
|
||||
if (config_->section(ConfigSection::nodeDatabase()).empty())
|
||||
{
|
||||
JLOG(m_journal.fatal()) <<
|
||||
"The [node_db] configuration setting " <<
|
||||
"has been updated and must be set";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (config_->doImport)
|
||||
{
|
||||
auto j = logs_->journal("NodeObject");
|
||||
NodeStore::DummyScheduler scheduler;
|
||||
std::unique_ptr <NodeStore::Database> source =
|
||||
NodeStore::Manager::instance().make_Database(
|
||||
"NodeStore.import",
|
||||
scheduler,
|
||||
0,
|
||||
*m_jobQueue,
|
||||
config_->section(ConfigSection::importNodeDatabase()),
|
||||
j);
|
||||
|
||||
JLOG(j.warn()) <<
|
||||
"Node import from '" << source->getName() << "' to '" <<
|
||||
getNodeStore().getName() << "'.";
|
||||
|
||||
getNodeStore().import(*source);
|
||||
}
|
||||
|
||||
// tune caches
|
||||
using namespace std::chrono;
|
||||
m_nodeStore->tune(
|
||||
config_->getSize(siNodeCacheSize),
|
||||
seconds{config_->getSize(siNodeCacheAge)});
|
||||
|
||||
m_ledgerMaster->tune(
|
||||
config_->getSize(siLedgerSize),
|
||||
seconds{config_->getSize(siLedgerAge)});
|
||||
|
||||
family().treecache().setTargetSize(
|
||||
config_->getSize (siTreeCacheSize));
|
||||
family().treecache().setTargetAge(
|
||||
seconds{config_->getSize(siTreeCacheAge)});
|
||||
|
||||
if (shardStore_)
|
||||
{
|
||||
shardStore_->tune(
|
||||
config_->getSize(siNodeCacheSize),
|
||||
seconds{config_->getSize(siNodeCacheAge)});
|
||||
sFamily_->treecache().setTargetSize(
|
||||
config_->getSize(siTreeCacheSize));
|
||||
sFamily_->treecache().setTargetAge(
|
||||
seconds{config_->getSize(siTreeCacheAge)});
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void signalled(const boost::system::error_code& ec, int signal_number)
|
||||
@@ -936,9 +1061,7 @@ public:
|
||||
using namespace std::chrono_literals;
|
||||
waitHandlerCounter_.join("Application", 1s, m_journal);
|
||||
|
||||
JLOG(m_journal.debug()) << "Flushing validations";
|
||||
mValidations.flush ();
|
||||
JLOG(m_journal.debug()) << "Validations flushed";
|
||||
|
||||
validatorSites_->stop ();
|
||||
|
||||
@@ -1128,9 +1251,7 @@ private:
|
||||
// and new validations must be greater than this.
|
||||
std::atomic<LedgerIndex> maxDisallowedLedger_ {0};
|
||||
|
||||
void addTxnSeqField();
|
||||
void addValidationSeqFields();
|
||||
bool updateTables ();
|
||||
void updateTxnDB ();
|
||||
bool nodeToShards ();
|
||||
bool validateShards ();
|
||||
void startGenesisLedger ();
|
||||
@@ -1191,29 +1312,12 @@ bool ApplicationImp::setup()
|
||||
if (!config_->standalone())
|
||||
timeKeeper_->run(config_->SNTP_SERVERS);
|
||||
|
||||
if (!initSqliteDbs ())
|
||||
{
|
||||
JLOG(m_journal.fatal()) << "Cannot create database connections!";
|
||||
if (!initSQLiteDBs() || !initNodeStoreDBs())
|
||||
return false;
|
||||
}
|
||||
|
||||
if (validatorKeys_.publicKey.size())
|
||||
setMaxDisallowedLedger();
|
||||
|
||||
getLedgerDB ().getSession ()
|
||||
<< boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
(config_->getSize (siLgrDBCache) * kilobytes(1)));
|
||||
|
||||
getTxnDB ().getSession ()
|
||||
<< boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
(config_->getSize (siTxnDBCache) * kilobytes(1)));
|
||||
|
||||
mTxnDB->setupCheckpointing (m_jobQueue.get(), logs());
|
||||
mLedgerDB->setupCheckpointing (m_jobQueue.get(), logs());
|
||||
|
||||
if (!updateTables ())
|
||||
return false;
|
||||
|
||||
// Configure the amendments the server supports
|
||||
{
|
||||
auto const& sa = detail::supportedAmendments();
|
||||
@@ -1323,23 +1427,6 @@ bool ApplicationImp::setup()
|
||||
return false;
|
||||
}
|
||||
|
||||
using namespace std::chrono;
|
||||
m_nodeStore->tune(config_->getSize(siNodeCacheSize),
|
||||
seconds{config_->getSize(siNodeCacheAge)});
|
||||
m_ledgerMaster->tune(config_->getSize(siLedgerSize),
|
||||
seconds{config_->getSize(siLedgerAge)});
|
||||
family().treecache().setTargetSize(config_->getSize (siTreeCacheSize));
|
||||
family().treecache().setTargetAge(
|
||||
seconds{config_->getSize(siTreeCacheAge)});
|
||||
if (shardStore_)
|
||||
{
|
||||
shardStore_->tune(config_->getSize(siNodeCacheSize),
|
||||
seconds{config_->getSize(siNodeCacheAge)});
|
||||
sFamily_->treecache().setTargetSize(config_->getSize(siTreeCacheSize));
|
||||
sFamily_->treecache().setTargetAge(
|
||||
seconds{config_->getSize(siTreeCacheAge)});
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
//
|
||||
// Server
|
||||
@@ -1981,49 +2068,31 @@ ApplicationImp::journal (std::string const& name)
|
||||
return logs_->journal (name);
|
||||
}
|
||||
|
||||
//VFALCO TODO clean this up since it is just a file holding a single member function definition
|
||||
|
||||
static
|
||||
std::vector<std::string>
|
||||
getSchema (DatabaseCon& dbc, std::string const& dbName)
|
||||
void
|
||||
ApplicationImp::updateTxnDB()
|
||||
{
|
||||
std::vector<std::string> schema;
|
||||
schema.reserve(32);
|
||||
|
||||
std::string sql = "SELECT sql FROM sqlite_master WHERE tbl_name='";
|
||||
sql += dbName;
|
||||
sql += "';";
|
||||
|
||||
std::string r;
|
||||
soci::statement st = (dbc.getSession ().prepare << sql,
|
||||
soci::into(r));
|
||||
st.execute ();
|
||||
while (st.fetch ())
|
||||
auto schemaHas = [&](std::string const& column)
|
||||
{
|
||||
schema.emplace_back (r);
|
||||
}
|
||||
std::string cid, name;
|
||||
soci::statement st = (mTxnDB->getSession().prepare <<
|
||||
("PRAGMA table_info(AccountTransactions);"),
|
||||
soci::into(cid),
|
||||
soci::into(name));
|
||||
|
||||
return schema;
|
||||
}
|
||||
st.execute();
|
||||
while (st.fetch())
|
||||
{
|
||||
if (name == column)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool schemaHas (
|
||||
DatabaseCon& dbc, std::string const& dbName, int line,
|
||||
std::string const& content, beast::Journal j)
|
||||
{
|
||||
std::vector<std::string> schema = getSchema (dbc, dbName);
|
||||
return false;
|
||||
};
|
||||
|
||||
if (static_cast<int> (schema.size ()) <= line)
|
||||
{
|
||||
JLOG (j.fatal()) << "Schema for " << dbName << " has too few lines";
|
||||
Throw<std::runtime_error> ("bad schema");
|
||||
}
|
||||
assert(schemaHas("TransID"));
|
||||
assert(!schemaHas("foobar"));
|
||||
|
||||
return schema[line].find (content) != std::string::npos;
|
||||
}
|
||||
|
||||
void ApplicationImp::addTxnSeqField ()
|
||||
{
|
||||
if (schemaHas (getTxnDB (), "AccountTransactions", 0, "TxnSeq", m_journal))
|
||||
if (schemaHas("TxnSeq"))
|
||||
return;
|
||||
|
||||
JLOG (m_journal.warn()) << "Transaction sequence field is missing";
|
||||
@@ -2104,79 +2173,6 @@ void ApplicationImp::addTxnSeqField ()
|
||||
tr.commit ();
|
||||
}
|
||||
|
||||
void ApplicationImp::addValidationSeqFields ()
|
||||
{
|
||||
if (schemaHas(getLedgerDB(), "Validations", 0, "LedgerSeq", m_journal))
|
||||
{
|
||||
assert(schemaHas(getLedgerDB(), "Validations", 0, "InitialSeq", m_journal));
|
||||
return;
|
||||
}
|
||||
|
||||
JLOG(m_journal.warn()) << "Validation sequence fields are missing";
|
||||
assert(!schemaHas(getLedgerDB(), "Validations", 0, "InitialSeq", m_journal));
|
||||
|
||||
auto& session = getLedgerDB().getSession();
|
||||
|
||||
soci::transaction tr(session);
|
||||
|
||||
JLOG(m_journal.info()) << "Altering table";
|
||||
session << "ALTER TABLE Validations "
|
||||
"ADD COLUMN LedgerSeq BIGINT UNSIGNED;";
|
||||
session << "ALTER TABLE Validations "
|
||||
"ADD COLUMN InitialSeq BIGINT UNSIGNED;";
|
||||
|
||||
// Create the indexes, too, so we don't have to
|
||||
// wait for the next startup, which may be a while.
|
||||
// These should be identical to those in LedgerDBInit
|
||||
JLOG(m_journal.info()) << "Building new indexes";
|
||||
session << "CREATE INDEX IF NOT EXISTS "
|
||||
"ValidationsBySeq ON Validations(LedgerSeq);";
|
||||
session << "CREATE INDEX IF NOT EXISTS ValidationsByInitialSeq "
|
||||
"ON Validations(InitialSeq, LedgerSeq);";
|
||||
|
||||
tr.commit();
|
||||
}
|
||||
|
||||
bool ApplicationImp::updateTables ()
|
||||
{
|
||||
if (config_->section (ConfigSection::nodeDatabase ()).empty ())
|
||||
{
|
||||
JLOG (m_journal.fatal()) << "The [node_db] configuration setting has been updated and must be set";
|
||||
return false;
|
||||
}
|
||||
|
||||
// perform any needed table updates
|
||||
assert (schemaHas (getTxnDB (), "AccountTransactions", 0, "TransID", m_journal));
|
||||
assert (!schemaHas (getTxnDB (), "AccountTransactions", 0, "foobar", m_journal));
|
||||
addTxnSeqField ();
|
||||
|
||||
if (schemaHas (getTxnDB (), "AccountTransactions", 0, "PRIMARY", m_journal))
|
||||
{
|
||||
JLOG (m_journal.fatal()) << "AccountTransactions database should not have a primary key";
|
||||
return false;
|
||||
}
|
||||
|
||||
addValidationSeqFields ();
|
||||
|
||||
if (config_->doImport)
|
||||
{
|
||||
auto j = logs_->journal("NodeObject");
|
||||
NodeStore::DummyScheduler scheduler;
|
||||
std::unique_ptr <NodeStore::Database> source =
|
||||
NodeStore::Manager::instance().make_Database ("NodeStore.import",
|
||||
scheduler, 0, *m_jobQueue,
|
||||
config_->section(ConfigSection::importNodeDatabase ()), j);
|
||||
|
||||
JLOG (j.warn())
|
||||
<< "Node import from '" << source->getName () << "' to '"
|
||||
<< getNodeStore ().getName () << "'.";
|
||||
|
||||
getNodeStore().import (*source);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ApplicationImp::nodeToShards()
|
||||
{
|
||||
assert(m_overlay);
|
||||
|
||||
@@ -66,10 +66,10 @@ const char* TxnDBInit[] =
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
|
||||
int TxnDBCount = std::extent<decltype(TxnDBInit)>::value;
|
||||
|
||||
// Ledger database holds ledgers and ledger confirmations
|
||||
const char* LedgerDBName = "ledger.db";
|
||||
const char* LedgerDBInit[] =
|
||||
{
|
||||
"PRAGMA synchronous=NORMAL;",
|
||||
@@ -78,7 +78,7 @@ const char* LedgerDBInit[] =
|
||||
|
||||
"BEGIN TRANSACTION;",
|
||||
|
||||
"CREATE TABLE IF NOT EXISTS Ledgers ( \
|
||||
"CREATE TABLE IF NOT EXISTS Ledgers ( \
|
||||
LedgerHash CHARACTER(64) PRIMARY KEY, \
|
||||
LedgerSeq BIGINT UNSIGNED, \
|
||||
PrevHash CHARACTER(64), \
|
||||
@@ -92,30 +92,14 @@ const char* LedgerDBInit[] =
|
||||
);",
|
||||
"CREATE INDEX IF NOT EXISTS SeqLedger ON Ledgers(LedgerSeq);",
|
||||
|
||||
// InitialSeq field is the current ledger seq when the row
|
||||
// is inserted. Only relevant during online delete
|
||||
"CREATE TABLE IF NOT EXISTS Validations ( \
|
||||
LedgerSeq BIGINT UNSIGNED, \
|
||||
InitialSeq BIGINT UNSIGNED, \
|
||||
LedgerHash CHARACTER(64), \
|
||||
NodePubKey CHARACTER(56), \
|
||||
SignTime BIGINT UNSIGNED, \
|
||||
RawData BLOB \
|
||||
);",
|
||||
"CREATE INDEX IF NOT EXISTS ValidationsByHash ON \
|
||||
Validations(LedgerHash);",
|
||||
"CREATE INDEX IF NOT EXISTS ValidationsBySeq ON \
|
||||
Validations(LedgerSeq);",
|
||||
"CREATE INDEX IF NOT EXISTS ValidationsByInitialSeq ON \
|
||||
Validations(InitialSeq, LedgerSeq);",
|
||||
"CREATE INDEX IF NOT EXISTS ValidationsByTime ON \
|
||||
Validations(SignTime);",
|
||||
// Old table and indexes no longer needed
|
||||
"DROP TABLE IF EXISTS Validations;",
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
|
||||
int LedgerDBCount = std::extent<decltype(LedgerDBInit)>::value;
|
||||
|
||||
const char* WalletDBName = "wallet.db";
|
||||
const char* WalletDBInit[] =
|
||||
{
|
||||
"BEGIN TRANSACTION;",
|
||||
@@ -154,7 +138,6 @@ const char* WalletDBInit[] =
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
|
||||
int WalletDBCount = std::extent<decltype(WalletDBInit)>::value;
|
||||
|
||||
} // ripple
|
||||
|
||||
@@ -25,12 +25,14 @@ namespace ripple {
|
||||
// VFALCO TODO Tidy these up into a class with functions and return types.
|
||||
extern const char* TxnDBName;
|
||||
extern const char* TxnDBInit[];
|
||||
extern const char* LedgerDBInit[];
|
||||
extern const char* WalletDBInit[];
|
||||
|
||||
// VFALCO TODO Figure out what these counts are for
|
||||
extern int TxnDBCount;
|
||||
|
||||
extern const char* LedgerDBName;
|
||||
extern const char* LedgerDBInit[];
|
||||
extern int LedgerDBCount;
|
||||
|
||||
extern const char* WalletDBName;
|
||||
extern const char* WalletDBInit[];
|
||||
extern int WalletDBCount;
|
||||
|
||||
} // ripple
|
||||
|
||||
@@ -657,118 +657,6 @@ SHAMapStoreImp::clearPrior (LedgerIndex lastRotated)
|
||||
if (health())
|
||||
return;
|
||||
|
||||
{
|
||||
/*
|
||||
Steps for migration:
|
||||
Assume: online_delete = 100, lastRotated = 1000,
|
||||
Last shutdown was at ledger # 1080.
|
||||
The current network validated ledger is 1090.
|
||||
Implies: Ledgers has entries from 900 to 1080.
|
||||
Validations has entries for all 1080 ledgers,
|
||||
including orphan validations that were not included
|
||||
in a validated ledger.
|
||||
1) Columns are created in Validations with default NULL values.
|
||||
2) During syncing, Ledgers and Validations for 1080 - 1090
|
||||
are received from the network. Records are created in
|
||||
Validations with InitialSeq approximately 1080 (exact value
|
||||
doesn't matter), and later validated with the matching
|
||||
LedgerSeq value.
|
||||
3) rippled participates in ledgers 1091-1100. Validations
|
||||
received are created with InitialSeq in that range, and
|
||||
appropriate LedgerSeqs. Maybe some of those ledgers are
|
||||
not accepted, so LedgerSeq stays null.
|
||||
4) At ledger 1100, this function is called with
|
||||
lastRotated = 1000. The first query tries to delete
|
||||
rows WHERE LedgerSeq < 1000. It finds none.
|
||||
5) The second round of deletions does not run.
|
||||
6) Ledgers continue to advance from 1100-1200 as described
|
||||
in step 3.
|
||||
7) At ledger 1200, this function is called again with
|
||||
lastRotated = 1100. The first query again tries to delete
|
||||
rows WHERE LedgerSeq < 1100. It finds the rows for 1080-1099.
|
||||
8) The second round of deletions runs. It gets
|
||||
WHERE v.LedgerSeq is NULL AND
|
||||
(v.InitialSeq IS NULL OR v.InitialSeq < 1100)
|
||||
The rows that are found include (a) ALL of the Validations
|
||||
for the first 1080 ledgers. (b) Any orphan validations that
|
||||
were created in step 3.
|
||||
9) This continues. The next rotation cycle does the same as steps
|
||||
7 & 8, except that none of the original Validations (8a) exist
|
||||
anymore, and 8b gets the orphans from step 6.
|
||||
*/
|
||||
|
||||
static auto anyValDeleted = false;
|
||||
auto const valDeleted = clearSql(*ledgerDb_, lastRotated,
|
||||
"SELECT MIN(LedgerSeq) FROM Validations;",
|
||||
"DELETE FROM Validations WHERE LedgerSeq < %u;");
|
||||
anyValDeleted |= valDeleted;
|
||||
|
||||
if (health())
|
||||
return;
|
||||
|
||||
if (anyValDeleted)
|
||||
{
|
||||
/* Delete the old NULL LedgerSeqs - the Validations that
|
||||
aren't linked to a validated ledger - but only if we
|
||||
deleted rows in the matching `clearSql` call, and only
|
||||
for those created with an old InitialSeq.
|
||||
*/
|
||||
using namespace std::chrono;
|
||||
auto const deleteBatch = setup_.deleteBatch;
|
||||
auto const continueLimit = (deleteBatch + 1) / 2;
|
||||
|
||||
std::string const deleteQuery(
|
||||
R"sql(DELETE FROM Validations
|
||||
WHERE LedgerHash IN
|
||||
(
|
||||
SELECT v.LedgerHash
|
||||
FROM Validations v
|
||||
WHERE v.LedgerSeq is NULL AND
|
||||
(v.InitialSeq IS NULL OR v.InitialSeq < )sql" +
|
||||
std::to_string(lastRotated) +
|
||||
") LIMIT " +
|
||||
std::to_string (deleteBatch) +
|
||||
");");
|
||||
|
||||
JLOG(journal_.debug()) << "start: " << deleteQuery << " of "
|
||||
<< deleteBatch << " rows.";
|
||||
long long totalRowsAffected = 0;
|
||||
long long rowsAffected;
|
||||
auto st = [&]
|
||||
{
|
||||
auto db = ledgerDb_->checkoutDb();
|
||||
return soci::statement(db->prepare << deleteQuery);
|
||||
}();
|
||||
if (health())
|
||||
return;
|
||||
do
|
||||
{
|
||||
{
|
||||
auto db = ledgerDb_->checkoutDb();
|
||||
auto const start = high_resolution_clock::now();
|
||||
st.execute(true);
|
||||
rowsAffected = st.get_affected_rows();
|
||||
totalRowsAffected += rowsAffected;
|
||||
auto const ms = duration_cast<milliseconds>(
|
||||
high_resolution_clock::now() - start).count();
|
||||
JLOG(journal_.trace()) << "step: deleted " << rowsAffected
|
||||
<< " rows in " << ms << "ms.";
|
||||
}
|
||||
if (health())
|
||||
return;
|
||||
if (rowsAffected >= continueLimit)
|
||||
std::this_thread::sleep_for(
|
||||
std::chrono::milliseconds(setup_.backOff));
|
||||
}
|
||||
while (rowsAffected && rowsAffected >= continueLimit);
|
||||
JLOG(journal_.debug()) << "finished: " << deleteQuery << ". Deleted "
|
||||
<< totalRowsAffected << " rows.";
|
||||
}
|
||||
}
|
||||
|
||||
if (health())
|
||||
return;
|
||||
|
||||
clearSql (*transactionDb_, lastRotated,
|
||||
"SELECT MIN(LedgerSeq) FROM Transactions;",
|
||||
"DELETE FROM Transactions WHERE LedgerSeq < %u;");
|
||||
|
||||
@@ -260,14 +260,6 @@ to_string(ValStatus m)
|
||||
using Validation = Validation;
|
||||
using Ledger = Ledger;
|
||||
|
||||
// Handle a newly stale validation, this should do minimal work since
|
||||
// it is called by Validations while it may be iterating Validations
|
||||
// under lock
|
||||
void onStale(Validation && );
|
||||
|
||||
// Flush the remaining validations (typically done on shutdown)
|
||||
void flush(hash_map<NodeID,Validation> && remaining);
|
||||
|
||||
// Return the current network time (used to determine staleness)
|
||||
NetClock::time_point now() const;
|
||||
|
||||
@@ -494,7 +486,6 @@ private:
|
||||
parms_, t, it->second.signTime(), it->second.seenTime()))
|
||||
{
|
||||
removeTrie(lock, it->first, it->second);
|
||||
adaptor_.onStale(std::move(it->second));
|
||||
it = current_.erase(it);
|
||||
}
|
||||
else
|
||||
@@ -617,7 +608,6 @@ public:
|
||||
if (val.signTime() > oldVal.signTime())
|
||||
{
|
||||
std::pair<Seq, ID> old(oldVal.seq(), oldVal.ledgerID());
|
||||
adaptor_.onStale(std::move(oldVal));
|
||||
ins.first->second = val;
|
||||
if (val.trusted())
|
||||
updateTrie(lock, nodeID, val, old);
|
||||
@@ -966,17 +956,8 @@ public:
|
||||
void
|
||||
flush()
|
||||
{
|
||||
hash_map<NodeID, Validation> flushed;
|
||||
{
|
||||
ScopedLock lock{mutex_};
|
||||
for (auto it : current_)
|
||||
{
|
||||
flushed.emplace(it.first, std::move(it.second));
|
||||
}
|
||||
current_.clear();
|
||||
}
|
||||
|
||||
adaptor_.flush(std::move(flushed));
|
||||
ScopedLock lock{mutex_};
|
||||
current_.clear();
|
||||
}
|
||||
|
||||
/** Return quantity of lagging proposers, and remove online proposers
|
||||
|
||||
@@ -125,18 +125,6 @@ class SHAMapStore_test : public beast::unit_test::suite
|
||||
return json[jss::result][jss::ledger][jss::hash].asString();
|
||||
}
|
||||
|
||||
void validationCheck(jtx::Env& env, int const expected)
|
||||
{
|
||||
auto db = env.app().getLedgerDB().checkoutDb();
|
||||
|
||||
int actual;
|
||||
*db << "SELECT count(*) AS rows FROM Validations;",
|
||||
soci::into(actual);
|
||||
|
||||
BEAST_EXPECT(actual == expected);
|
||||
|
||||
}
|
||||
|
||||
void ledgerCheck(jtx::Env& env, int const rows,
|
||||
int const first)
|
||||
{
|
||||
@@ -214,7 +202,6 @@ public:
|
||||
auto& store = env.app().getSHAMapStore();
|
||||
env.fund(XRP(10000), noripple("alice"));
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, 1, 2);
|
||||
transactionCheck(env, 0);
|
||||
accountTransactionCheck(env, 0);
|
||||
@@ -260,67 +247,6 @@ public:
|
||||
getHash(ledgers[i]).length());
|
||||
}
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, deleteInterval + 1, 2);
|
||||
transactionCheck(env, deleteInterval);
|
||||
accountTransactionCheck(env, 2 * deleteInterval);
|
||||
|
||||
{
|
||||
// Since standalone doesn't _do_ validations, manually
|
||||
// insert some into the table. Create some with the
|
||||
// hashes from our real ledgers, and some with fake
|
||||
// hashes to represent validations that never ended up
|
||||
// in a validated ledger.
|
||||
char lh[65];
|
||||
memset(lh, 'a', 64);
|
||||
lh[64] = '\0';
|
||||
std::vector<std::string> preSeqLedgerHashes({
|
||||
lh
|
||||
});
|
||||
std::vector<std::string> badLedgerHashes;
|
||||
std::vector<LedgerIndex> badLedgerSeqs;
|
||||
std::vector<std::string> ledgerHashes;
|
||||
std::vector<LedgerIndex> ledgerSeqs;
|
||||
for (auto const& lgr : ledgers)
|
||||
{
|
||||
ledgerHashes.emplace_back(getHash(lgr.second));
|
||||
ledgerSeqs.emplace_back(lgr.second[jss::result][jss::ledger_index].asUInt());
|
||||
}
|
||||
for (auto i = 0; i < 10; ++i)
|
||||
{
|
||||
++lh[30];
|
||||
preSeqLedgerHashes.emplace_back(lh);
|
||||
++lh[20];
|
||||
badLedgerHashes.emplace_back(lh);
|
||||
badLedgerSeqs.emplace_back(i + 1);
|
||||
}
|
||||
|
||||
auto db = env.app().getLedgerDB().checkoutDb();
|
||||
|
||||
// Pre-migration validation - no sequence numbers.
|
||||
*db << "INSERT INTO Validations "
|
||||
"(LedgerHash) "
|
||||
"VALUES "
|
||||
"(:ledgerHash);",
|
||||
soci::use(preSeqLedgerHashes);
|
||||
// Post-migration orphan validation - InitalSeq,
|
||||
// but no LedgerSeq
|
||||
*db << "INSERT INTO Validations "
|
||||
"(LedgerHash, InitialSeq) "
|
||||
"VALUES "
|
||||
"(:ledgerHash, :initialSeq);",
|
||||
soci::use(badLedgerHashes),
|
||||
soci::use(badLedgerSeqs);
|
||||
// Post-migration validated ledger.
|
||||
*db << "INSERT INTO Validations "
|
||||
"(LedgerHash, LedgerSeq) "
|
||||
"VALUES "
|
||||
"(:ledgerHash, :ledgerSeq);",
|
||||
soci::use(ledgerHashes),
|
||||
soci::use(ledgerSeqs);
|
||||
}
|
||||
|
||||
validationCheck(env, deleteInterval + 23);
|
||||
ledgerCheck(env, deleteInterval + 1, 2);
|
||||
transactionCheck(env, deleteInterval);
|
||||
accountTransactionCheck(env, 2 * deleteInterval);
|
||||
@@ -340,7 +266,6 @@ public:
|
||||
BEAST_EXPECT(lastRotated == 11);
|
||||
|
||||
// That took care of the fake hashes
|
||||
validationCheck(env, deleteInterval + 8);
|
||||
ledgerCheck(env, deleteInterval + 1, 3);
|
||||
transactionCheck(env, deleteInterval);
|
||||
accountTransactionCheck(env, 2 * deleteInterval);
|
||||
@@ -348,8 +273,6 @@ public:
|
||||
// The last iteration of this loop should trigger a rotate
|
||||
for (auto i = lastRotated - 1; i < lastRotated + deleteInterval - 1; ++i)
|
||||
{
|
||||
validationCheck(env, deleteInterval + i + 1 - lastRotated + 8);
|
||||
|
||||
env.close();
|
||||
|
||||
ledgerTmp = env.rpc("ledger", "current");
|
||||
@@ -361,28 +284,12 @@ public:
|
||||
i == lastRotated + deleteInterval - 2);
|
||||
BEAST_EXPECT(goodLedger(env, ledgers[i], to_string(i), true) &&
|
||||
getHash(ledgers[i]).length());
|
||||
|
||||
std::vector<std::string> ledgerHashes({
|
||||
getHash(ledgers[i])
|
||||
});
|
||||
std::vector<LedgerIndex> ledgerSeqs({
|
||||
ledgers[i][jss::result][jss::ledger_index].asUInt()
|
||||
});
|
||||
auto db = env.app().getLedgerDB().checkoutDb();
|
||||
|
||||
*db << "INSERT INTO Validations "
|
||||
"(LedgerHash, LedgerSeq) "
|
||||
"VALUES "
|
||||
"(:ledgerHash, :ledgerSeq);",
|
||||
soci::use(ledgerHashes),
|
||||
soci::use(ledgerSeqs);
|
||||
}
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
BEAST_EXPECT(store.getLastRotated() == deleteInterval + lastRotated);
|
||||
|
||||
validationCheck(env, deleteInterval - 1);
|
||||
ledgerCheck(env, deleteInterval + 1, lastRotated);
|
||||
transactionCheck(env, 0);
|
||||
accountTransactionCheck(env, 0);
|
||||
@@ -421,7 +328,6 @@ public:
|
||||
|
||||
// The database will always have back to ledger 2,
|
||||
// regardless of lastRotated.
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, ledgerSeq - 2, 2);
|
||||
BEAST_EXPECT(lastRotated == store.getLastRotated());
|
||||
|
||||
@@ -435,7 +341,6 @@ public:
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, ledgerSeq - lastRotated, lastRotated);
|
||||
BEAST_EXPECT(lastRotated != store.getLastRotated());
|
||||
|
||||
@@ -452,7 +357,6 @@ public:
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, deleteInterval + 1, lastRotated);
|
||||
BEAST_EXPECT(lastRotated != store.getLastRotated());
|
||||
}
|
||||
@@ -491,7 +395,6 @@ public:
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, ledgerSeq - 2, 2);
|
||||
BEAST_EXPECT(lastRotated == store.getLastRotated());
|
||||
|
||||
@@ -504,7 +407,6 @@ public:
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, ledgerSeq - 2, 2);
|
||||
BEAST_EXPECT(store.getLastRotated() == lastRotated);
|
||||
|
||||
@@ -518,7 +420,6 @@ public:
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, ledgerSeq - lastRotated, lastRotated);
|
||||
|
||||
BEAST_EXPECT(store.getLastRotated() == ledgerSeq - 1);
|
||||
@@ -547,7 +448,6 @@ public:
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, ledgerSeq - firstBatch, firstBatch);
|
||||
|
||||
BEAST_EXPECT(store.getLastRotated() == ledgerSeq - 1);
|
||||
@@ -582,7 +482,6 @@ public:
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, ledgerSeq - lastRotated, lastRotated);
|
||||
|
||||
BEAST_EXPECT(store.getLastRotated() == ledgerSeq - 1);
|
||||
@@ -616,7 +515,6 @@ public:
|
||||
|
||||
store.rendezvous();
|
||||
|
||||
validationCheck(env, 0);
|
||||
ledgerCheck(env, ledgerSeq - lastRotated, lastRotated);
|
||||
|
||||
BEAST_EXPECT(store.getLastRotated() == ledgerSeq - 1);
|
||||
|
||||
@@ -165,18 +165,9 @@ class Validations_test : public beast::unit_test::suite
|
||||
}
|
||||
};
|
||||
|
||||
// Saved StaleData for inspection in test
|
||||
struct StaleData
|
||||
{
|
||||
std::vector<Validation> stale;
|
||||
hash_map<PeerID, Validation> flushed;
|
||||
};
|
||||
|
||||
// Generic Validations adaptor that saves stale/flushed data into
|
||||
// a StaleData instance.
|
||||
// Generic Validations adaptor
|
||||
class Adaptor
|
||||
{
|
||||
StaleData& staleData_;
|
||||
clock_type& c_;
|
||||
LedgerOracle& oracle_;
|
||||
|
||||
@@ -198,8 +189,8 @@ class Validations_test : public beast::unit_test::suite
|
||||
using Validation = csf::Validation;
|
||||
using Ledger = csf::Ledger;
|
||||
|
||||
Adaptor(StaleData& sd, clock_type& c, LedgerOracle& o)
|
||||
: staleData_{sd}, c_{c}, oracle_{o}
|
||||
Adaptor(clock_type& c, LedgerOracle& o)
|
||||
: c_{c}, oracle_{o}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -209,18 +200,6 @@ class Validations_test : public beast::unit_test::suite
|
||||
return toNetClock(c_);
|
||||
}
|
||||
|
||||
void
|
||||
onStale(Validation&& v)
|
||||
{
|
||||
staleData_.stale.emplace_back(std::move(v));
|
||||
}
|
||||
|
||||
void
|
||||
flush(hash_map<PeerID, Validation>&& remaining)
|
||||
{
|
||||
staleData_.flushed = std::move(remaining);
|
||||
}
|
||||
|
||||
boost::optional<Ledger>
|
||||
acquire(Ledger::ID const& id)
|
||||
{
|
||||
@@ -235,7 +214,6 @@ class Validations_test : public beast::unit_test::suite
|
||||
// accessors for simplifying test logic
|
||||
class TestHarness
|
||||
{
|
||||
StaleData staleData_;
|
||||
ValidationParms p_;
|
||||
beast::manual_clock<std::chrono::steady_clock> clock_;
|
||||
TestValidations tv_;
|
||||
@@ -243,7 +221,7 @@ class Validations_test : public beast::unit_test::suite
|
||||
|
||||
public:
|
||||
explicit TestHarness(LedgerOracle& o)
|
||||
: tv_(p_, clock_, staleData_, clock_, o)
|
||||
: tv_(p_, clock_, clock_, o)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -276,18 +254,6 @@ class Validations_test : public beast::unit_test::suite
|
||||
{
|
||||
return clock_;
|
||||
}
|
||||
|
||||
std::vector<Validation> const&
|
||||
stale() const
|
||||
{
|
||||
return staleData_.stale;
|
||||
}
|
||||
|
||||
hash_map<PeerID, Validation> const&
|
||||
flushed() const
|
||||
{
|
||||
return staleData_.flushed;
|
||||
}
|
||||
};
|
||||
|
||||
Ledger const genesisLedger{Ledger::MakeGenesis{}};
|
||||
@@ -320,16 +286,10 @@ class Validations_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(ValStatus::badSeq == harness.add(v));
|
||||
|
||||
harness.clock().advance(1s);
|
||||
// Replace with a new validation and ensure the old one is stale
|
||||
BEAST_EXPECT(harness.stale().empty());
|
||||
|
||||
BEAST_EXPECT(
|
||||
ValStatus::current == harness.add(n.validate(ledgerAB)));
|
||||
|
||||
BEAST_EXPECT(harness.stale().size() == 1);
|
||||
|
||||
BEAST_EXPECT(harness.stale()[0].ledgerID() == ledgerA.id());
|
||||
|
||||
// Test the node changing signing key
|
||||
|
||||
// Confirm old ledger on hand, but not new ledger
|
||||
@@ -476,14 +436,11 @@ class Validations_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(
|
||||
harness.vals().getPreferred(genesisLedger) ==
|
||||
std::make_pair(ledgerAB.seq(), ledgerAB.id()));
|
||||
BEAST_EXPECT(harness.stale().empty());
|
||||
harness.clock().advance(harness.parms().validationCURRENT_LOCAL);
|
||||
|
||||
// trigger check for stale
|
||||
trigger(harness.vals());
|
||||
|
||||
BEAST_EXPECT(harness.stale().size() == 1);
|
||||
BEAST_EXPECT(harness.stale()[0].ledgerID() == ledgerAB.id());
|
||||
BEAST_EXPECT(
|
||||
harness.vals().getNodesAfter(ledgerA, ledgerA.id()) == 0);
|
||||
BEAST_EXPECT(
|
||||
@@ -780,7 +737,6 @@ class Validations_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(ValStatus::current == harness.add(val));
|
||||
expected.emplace(node.nodeID(), val);
|
||||
}
|
||||
Validation staleA = expected.find(a.nodeID())->second;
|
||||
|
||||
// Send in a new validation for a, saving the new one into the expected
|
||||
// map after setting the proper prior ledger ID it replaced
|
||||
@@ -788,18 +744,6 @@ class Validations_test : public beast::unit_test::suite
|
||||
auto newVal = a.validate(ledgerAB);
|
||||
BEAST_EXPECT(ValStatus::current == harness.add(newVal));
|
||||
expected.find(a.nodeID())->second = newVal;
|
||||
|
||||
// Now flush
|
||||
harness.vals().flush();
|
||||
|
||||
// Original a validation was stale
|
||||
BEAST_EXPECT(harness.stale().size() == 1);
|
||||
BEAST_EXPECT(harness.stale()[0] == staleA);
|
||||
BEAST_EXPECT(harness.stale()[0].nodeID() == a.nodeID());
|
||||
|
||||
auto const& flushed = harness.flushed();
|
||||
|
||||
BEAST_EXPECT(flushed == expected);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -310,14 +310,7 @@ public:
|
||||
LedgerHash CHARACTER(64) PRIMARY KEY, \
|
||||
LedgerSeq BIGINT UNSIGNED \
|
||||
);",
|
||||
"CREATE INDEX SeqLedger ON Ledgers(LedgerSeq);",
|
||||
|
||||
"CREATE TABLE Validations ( \
|
||||
LedgerHash CHARACTER(64) \
|
||||
);",
|
||||
"CREATE INDEX ValidationsByHash ON \
|
||||
Validations(LedgerHash);",
|
||||
"END TRANSACTION;"};
|
||||
"CREATE INDEX SeqLedger ON Ledgers(LedgerSeq);"};
|
||||
int dbInitCount = std::extent<decltype(dbInit)>::value;
|
||||
for (int i = 0; i < dbInitCount; ++i)
|
||||
{
|
||||
@@ -343,16 +336,10 @@ public:
|
||||
s << "INSERT INTO Ledgers (LedgerHash, LedgerSeq) VALUES "
|
||||
"(:lh, :li);",
|
||||
soci::use (ledgerHashes), soci::use (ledgerIndexes);
|
||||
s << "INSERT INTO Validations (LedgerHash) VALUES "
|
||||
"(:lh);", soci::use (ledgerHashes);
|
||||
|
||||
std::vector<int> ledgersLS (numRows * 2);
|
||||
std::vector<std::string> validationsLH (numRows * 2);
|
||||
s << "SELECT LedgerSeq FROM Ledgers;", soci::into (ledgersLS);
|
||||
s << "SELECT LedgerHash FROM Validations;",
|
||||
soci::into (validationsLH);
|
||||
BEAST_EXPECT(ledgersLS.size () == numRows &&
|
||||
validationsLH.size () == numRows);
|
||||
BEAST_EXPECT(ledgersLS.size () == numRows);
|
||||
}
|
||||
namespace bfs = boost::filesystem;
|
||||
// Remove the database
|
||||
|
||||
@@ -144,16 +144,6 @@ struct Peer
|
||||
return p_.now();
|
||||
}
|
||||
|
||||
void
|
||||
onStale(Validation&& v)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
flush(hash_map<PeerID, Validation>&& remaining)
|
||||
{
|
||||
}
|
||||
|
||||
boost::optional<Ledger>
|
||||
acquire(Ledger::ID const & id)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user