Implement Shard SQLite support

This commit is contained in:
Miguel Portilla
2019-07-19 18:29:27 -04:00
committed by Nik Bougalis
parent 008fc5155a
commit 66fad62e66
27 changed files with 1217 additions and 986 deletions

View File

@@ -2086,7 +2086,6 @@ else ()
src/ripple/app/main/Application.cpp
src/ripple/app/main/BasicApp.cpp
src/ripple/app/main/CollectorManager.cpp
src/ripple/app/main/DBInit.cpp
src/ripple/app/main/LoadManager.cpp
src/ripple/app/main/Main.cpp
src/ripple/app/main/NodeIdentity.cpp

View File

@@ -582,7 +582,7 @@ public:
void signalStop() override;
bool checkSigs() const override;
void checkSigs(bool) override;
int fdlimit () const override;
int fdRequired() const override;
//--------------------------------------------------------------------------
@@ -858,12 +858,12 @@ public:
// transaction database
mTxnDB = std::make_unique <DatabaseCon>(
setup,
TxnDBName,
TxnDBInit,
TxnDBCount);
TxDBName,
TxDBPragma,
TxDBInit);
mTxnDB->getSession() <<
boost::str(boost::format("PRAGMA cache_size=-%d;") %
(config_->getSize(siTxnDBCache) * kilobytes(1)));
kilobytes(config_->getSize(siTxnDBCache)));
mTxnDB->setupCheckpointing(m_jobQueue.get(), logs());
if (!setup.standAlone ||
@@ -900,20 +900,20 @@ public:
// ledger database
mLedgerDB = std::make_unique <DatabaseCon>(
setup,
LedgerDBName,
LedgerDBInit,
LedgerDBCount);
LgrDBName,
LgrDBPragma,
LgrDBInit);
mLedgerDB->getSession() <<
boost::str(boost::format("PRAGMA cache_size=-%d;") %
(config_->getSize(siLgrDBCache) * kilobytes(1)));
kilobytes(config_->getSize(siLgrDBCache)));
mLedgerDB->setupCheckpointing(m_jobQueue.get(), logs());
// wallet database
mWalletDB = std::make_unique <DatabaseCon>(
setup,
WalletDBName,
WalletDBInit,
WalletDBCount);
std::array<char const*, 0>(),
WalletDBInit);
}
catch (std::exception const& e)
{
@@ -963,11 +963,8 @@ public:
family().treecache().setTargetAge(
seconds{config_->getSize(siTreeCacheAge)});
if (shardStore_)
if (sFamily_)
{
shardStore_->tune(
config_->getSize(siNodeCacheSize),
seconds{config_->getSize(siNodeCacheAge)});
sFamily_->treecache().setTargetSize(
config_->getSize(siTreeCacheSize));
sFamily_->treecache().setTargetAge(
@@ -1174,9 +1171,10 @@ public:
}
DatabaseCon::Setup dbSetup = setup_DatabaseCon(*config_);
boost::filesystem::path dbPath = dbSetup.dataDir / TxnDBName;
boost::filesystem::path dbPath = dbSetup.dataDir / TxDBName;
boost::system::error_code ec;
boost::optional<std::uint64_t> dbSize = boost::filesystem::file_size(dbPath, ec);
boost::optional<std::uint64_t> dbSize =
boost::filesystem::file_size(dbPath, ec);
if (ec)
{
JLOG(m_journal.error())
@@ -1632,7 +1630,7 @@ void ApplicationImp::checkSigs(bool check)
checkSigs_ = check;
}
int ApplicationImp::fdlimit() const
int ApplicationImp::fdRequired() const
{
// Standard handles, config file, misc I/O etc:
int needed = 128;
@@ -1642,10 +1640,10 @@ int ApplicationImp::fdlimit() const
// the number of fds needed by the backend (internally
// doubled if online delete is enabled).
needed += std::max(5, m_shaMapStore->fdlimit());
needed += std::max(5, m_shaMapStore->fdRequired());
if (shardStore_)
needed += shardStore_->fdlimit();
needed += shardStore_->fdRequired();
// One fd per incoming connection a port can accept, or
// if no limit is set, assume it'll handle 256 clients.

View File

@@ -183,8 +183,8 @@ public:
virtual beast::Journal journal (std::string const& name) = 0;
/* Returns the number of file descriptors the application wants */
virtual int fdlimit () const = 0;
/* Returns the number of file descriptors the application needs */
virtual int fdRequired() const = 0;
/** Retrieve the "wallet database" */
virtual DatabaseCon& getWalletDB () = 0;

View File

@@ -1,149 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/main/DBInit.h>
#include <type_traits>
namespace ripple {
// Transaction database holds transactions and public keys
const char* TxnDBName = "transaction.db";
const char* TxnDBInit[] =
{
"PRAGMA page_size=4096;",
"PRAGMA synchronous=NORMAL;",
"PRAGMA journal_mode=WAL;",
"PRAGMA journal_size_limit=1582080;",
"PRAGMA max_page_count=2147483646;",
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
"PRAGMA mmap_size=17179869184;",
#endif
"BEGIN TRANSACTION;",
"CREATE TABLE IF NOT EXISTS Transactions ( \
TransID CHARACTER(64) PRIMARY KEY, \
TransType CHARACTER(24), \
FromAcct CHARACTER(35), \
FromSeq BIGINT UNSIGNED, \
LedgerSeq BIGINT UNSIGNED, \
Status CHARACTER(1), \
RawTxn BLOB, \
TxnMeta BLOB \
);",
"CREATE INDEX IF NOT EXISTS TxLgrIndex ON \
Transactions(LedgerSeq);",
"CREATE TABLE IF NOT EXISTS AccountTransactions ( \
TransID CHARACTER(64), \
Account CHARACTER(64), \
LedgerSeq BIGINT UNSIGNED, \
TxnSeq INTEGER \
);",
"CREATE INDEX IF NOT EXISTS AcctTxIDIndex ON \
AccountTransactions(TransID);",
"CREATE INDEX IF NOT EXISTS AcctTxIndex ON \
AccountTransactions(Account, LedgerSeq, TxnSeq, TransID);",
"CREATE INDEX IF NOT EXISTS AcctLgrIndex ON \
AccountTransactions(LedgerSeq, Account, TransID);",
"END TRANSACTION;"
};
int TxnDBCount = std::extent<decltype(TxnDBInit)>::value;
// Ledger database holds ledgers and ledger confirmations
const char* LedgerDBName = "ledger.db";
const char* LedgerDBInit[] =
{
"PRAGMA synchronous=NORMAL;",
"PRAGMA journal_mode=WAL;",
"PRAGMA journal_size_limit=1582080;",
"BEGIN TRANSACTION;",
"CREATE TABLE IF NOT EXISTS Ledgers ( \
LedgerHash CHARACTER(64) PRIMARY KEY, \
LedgerSeq BIGINT UNSIGNED, \
PrevHash CHARACTER(64), \
TotalCoins BIGINT UNSIGNED, \
ClosingTime BIGINT UNSIGNED, \
PrevClosingTime BIGINT UNSIGNED, \
CloseTimeRes BIGINT UNSIGNED, \
CloseFlags BIGINT UNSIGNED, \
AccountSetHash CHARACTER(64), \
TransSetHash CHARACTER(64) \
);",
"CREATE INDEX IF NOT EXISTS SeqLedger ON Ledgers(LedgerSeq);",
// Old table and indexes no longer needed
"DROP TABLE IF EXISTS Validations;",
"END TRANSACTION;"
};
int LedgerDBCount = std::extent<decltype(LedgerDBInit)>::value;
const char* WalletDBName = "wallet.db";
const char* WalletDBInit[] =
{
"BEGIN TRANSACTION;",
// A node's identity must be persisted, including
// for clustering purposes. This table holds one
// entry: the server's unique identity, but the
// value can be overriden by specifying a node
// identity in the config file using a [node_seed]
// entry.
"CREATE TABLE IF NOT EXISTS NodeIdentity ( \
PublicKey CHARACTER(53), \
PrivateKey CHARACTER(52) \
);",
// Peer reservations.
"CREATE TABLE IF NOT EXISTS PeerReservations ( \
PublicKey CHARACTER(53) UNIQUE NOT NULL, \
Description CHARACTER(64) NOT NULL \
);",
// Validator Manifests
"CREATE TABLE IF NOT EXISTS ValidatorManifests ( \
RawData BLOB NOT NULL \
);",
"CREATE TABLE IF NOT EXISTS PublisherManifests ( \
RawData BLOB NOT NULL \
);",
// Old tables that were present in wallet.db and we
// no longer need or use.
"DROP INDEX IF EXISTS SeedNodeNext;",
"DROP INDEX IF EXISTS SeedDomainNext;",
"DROP TABLE IF EXISTS Features;",
"DROP TABLE IF EXISTS TrustedNodes;",
"DROP TABLE IF EXISTS ValidatorReferrals;",
"DROP TABLE IF EXISTS IpReferrals;",
"DROP TABLE IF EXISTS SeedNodes;",
"DROP TABLE IF EXISTS SeedDomains;",
"DROP TABLE IF EXISTS Misc;",
"END TRANSACTION;"
};
int WalletDBCount = std::extent<decltype(WalletDBInit)>::value;
} // ripple

View File

@@ -20,20 +20,145 @@
#ifndef RIPPLE_APP_DATA_DBINIT_H_INCLUDED
#define RIPPLE_APP_DATA_DBINIT_H_INCLUDED
#include <array>
namespace ripple {
// VFALCO TODO Tidy these up into a class with functions and return types.
extern const char* TxnDBName;
extern const char* TxnDBInit[];
extern int TxnDBCount;
////////////////////////////////////////////////////////////////////////////////
extern const char* LedgerDBName;
extern const char* LedgerDBInit[];
extern int LedgerDBCount;
// Ledger database holds ledgers and ledger confirmations
static constexpr auto LgrDBName {"ledger.db"};
extern const char* WalletDBName;
extern const char* WalletDBInit[];
extern int WalletDBCount;
static constexpr
std::array<char const*, 3> LgrDBPragma {{
"PRAGMA synchronous=NORMAL;",
"PRAGMA journal_mode=WAL;",
"PRAGMA journal_size_limit=1582080;"
}};
static constexpr
std::array<char const*, 5> LgrDBInit {{
"BEGIN TRANSACTION;",
"CREATE TABLE IF NOT EXISTS Ledgers ( \
LedgerHash CHARACTER(64) PRIMARY KEY, \
LedgerSeq BIGINT UNSIGNED, \
PrevHash CHARACTER(64), \
TotalCoins BIGINT UNSIGNED, \
ClosingTime BIGINT UNSIGNED, \
PrevClosingTime BIGINT UNSIGNED, \
CloseTimeRes BIGINT UNSIGNED, \
CloseFlags BIGINT UNSIGNED, \
AccountSetHash CHARACTER(64), \
TransSetHash CHARACTER(64) \
);",
"CREATE INDEX IF NOT EXISTS SeqLedger ON Ledgers(LedgerSeq);",
// Old table and indexes no longer needed
"DROP TABLE IF EXISTS Validations;",
"END TRANSACTION;"
}};
////////////////////////////////////////////////////////////////////////////////
// Transaction database holds transactions and public keys
static constexpr auto TxDBName {"transaction.db"};
static constexpr
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
std::array<char const*, 6> TxDBPragma {{
#else
std::array<char const*, 5> TxDBPragma {{
#endif
"PRAGMA page_size=4096;",
"PRAGMA synchronous=NORMAL;",
"PRAGMA journal_mode=WAL;",
"PRAGMA journal_size_limit=1582080;",
"PRAGMA max_page_count=2147483646;",
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
"PRAGMA mmap_size=17179869184;"
#endif
}};
static constexpr
std::array<char const*, 8> TxDBInit {{
"BEGIN TRANSACTION;",
"CREATE TABLE IF NOT EXISTS Transactions ( \
TransID CHARACTER(64) PRIMARY KEY, \
TransType CHARACTER(24), \
FromAcct CHARACTER(35), \
FromSeq BIGINT UNSIGNED, \
LedgerSeq BIGINT UNSIGNED, \
Status CHARACTER(1), \
RawTxn BLOB, \
TxnMeta BLOB \
);",
"CREATE INDEX IF NOT EXISTS TxLgrIndex ON \
Transactions(LedgerSeq);",
"CREATE TABLE IF NOT EXISTS AccountTransactions ( \
TransID CHARACTER(64), \
Account CHARACTER(64), \
LedgerSeq BIGINT UNSIGNED, \
TxnSeq INTEGER \
);",
"CREATE INDEX IF NOT EXISTS AcctTxIDIndex ON \
AccountTransactions(TransID);",
"CREATE INDEX IF NOT EXISTS AcctTxIndex ON \
AccountTransactions(Account, LedgerSeq, TxnSeq, TransID);",
"CREATE INDEX IF NOT EXISTS AcctLgrIndex ON \
AccountTransactions(LedgerSeq, Account, TransID);",
"END TRANSACTION;"
}};
////////////////////////////////////////////////////////////////////////////////
// Pragma for Ledger and Transaction databases with complete shards
static constexpr
std::array<char const*, 2> CompleteShardDBPragma {{
"PRAGMA synchronous=OFF;",
"PRAGMA journal_mode=OFF;"
}};
////////////////////////////////////////////////////////////////////////////////
static constexpr auto WalletDBName {"wallet.db"};
static constexpr
std::array<char const*, 6> WalletDBInit {{
"BEGIN TRANSACTION;",
// A node's identity must be persisted, including
// for clustering purposes. This table holds one
// entry: the server's unique identity, but the
// value can be overriden by specifying a node
// identity in the config file using a [node_seed]
// entry.
"CREATE TABLE IF NOT EXISTS NodeIdentity ( \
PublicKey CHARACTER(53), \
PrivateKey CHARACTER(52) \
);",
// Peer reservations
"CREATE TABLE IF NOT EXISTS PeerReservations ( \
PublicKey CHARACTER(53) UNIQUE NOT NULL, \
Description CHARACTER(64) NOT NULL \
);",
// Validator Manifests
"CREATE TABLE IF NOT EXISTS ValidatorManifests ( \
RawData BLOB NOT NULL \
);",
"CREATE TABLE IF NOT EXISTS PublisherManifests ( \
RawData BLOB NOT NULL \
);",
"END TRANSACTION;"
}};
} // ripple

View File

@@ -489,9 +489,12 @@ int run (int argc, char** argv)
std::cerr << "vacuum not applicable in standalone mode.\n";
return -1;
}
boost::filesystem::path dbPath = dbSetup.dataDir / TxnDBName;
auto txnDB = std::make_unique<DatabaseCon> (dbSetup, TxnDBName,
TxnDBInit, TxnDBCount);
boost::filesystem::path dbPath = dbSetup.dataDir / TxDBName;
auto txnDB = std::make_unique<DatabaseCon>(
dbSetup,
TxDBName,
TxDBPragma,
TxDBInit);
if (txnDB.get() == nullptr)
{
std::cerr << "Cannot create connection to " << dbPath.string() <<
@@ -711,7 +714,7 @@ int run (int argc, char** argv)
// With our configuration parsed, ensure we have
// enough file descriptors available:
if (!adjustDescriptorLimit(
app->fdlimit(),
app->fdRequired(),
app->logs().journal("Application")))
{
StopSustain();

View File

@@ -62,8 +62,8 @@ public:
/** Highest ledger that may be deleted. */
virtual LedgerIndex getCanDelete() = 0;
/** The number of files that are needed. */
virtual int fdlimit() const = 0;
/** Returns the number of file descriptors that are needed. */
virtual int fdRequired() const = 0;
};
//------------------------------------------------------------------------------

View File

@@ -254,7 +254,7 @@ SHAMapStoreImp::makeNodeStore(std::string const& name, std::int32_t readThreads)
std::move(archiveBackend),
app_.config().section(ConfigSection::nodeDatabase()),
app_.logs().journal(nodeStoreName_));
fdlimit_ += dbr->fdlimit();
fdRequired_ += dbr->fdRequired();
dbRotating_ = dbr.get();
db.reset(dynamic_cast<NodeStore::Database*>(dbr.release()));
}
@@ -267,7 +267,7 @@ SHAMapStoreImp::makeNodeStore(std::string const& name, std::int32_t readThreads)
app_.getJobQueue(),
app_.config().section(ConfigSection::nodeDatabase()),
app_.logs().journal(nodeStoreName_));
fdlimit_ += db->fdlimit();
fdRequired_ += db->fdRequired();
}
return db;
}
@@ -298,9 +298,9 @@ SHAMapStoreImp::rendezvous() const
}
int
SHAMapStoreImp::fdlimit () const
SHAMapStoreImp::fdRequired() const
{
return fdlimit_;
return fdRequired_;
}
bool

View File

@@ -24,6 +24,7 @@
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/core/DatabaseCon.h>
#include <ripple/nodestore/DatabaseRotating.h>
#include <condition_variable>
#include <thread>
@@ -97,7 +98,7 @@ private:
std::shared_ptr<Ledger const> newLedger_;
std::atomic<bool> working_;
std::atomic <LedgerIndex> canDelete_;
int fdlimit_ = 0;
int fdRequired_ = 0;
std::uint32_t deleteInterval_ = 0;
bool advisoryDelete_ = false;
@@ -172,7 +173,7 @@ public:
void onLedgerClosed (std::shared_ptr<Ledger const> const& ledger) override;
void rendezvous() const override;
int fdlimit() const override;
int fdRequired() const override;
private:
// callback for visitNodes

View File

@@ -44,7 +44,7 @@ class Rules;
enum SizedItemName
{
siSweepInterval,
siSweepInterval = 0,
siNodeCacheSize,
siNodeCacheAge,
siTreeCacheSize,
@@ -56,14 +56,32 @@ enum SizedItemName
siLedgerFetch,
siHashNodeDBCache,
siTxnDBCache,
siLgrDBCache,
siLgrDBCache
};
struct SizedItem
{
SizedItemName item;
int sizes[5];
};
static constexpr
std::array<std::array<int, 5>, 13> sizedItems
{{
// tiny small medium large huge
{{ 10, 30, 60, 90, 120 }}, // siSweepInterval
{{ 2, 3, 5, 5, 8 }}, // siLedgerFetch
{{ 16384, 32768, 131072, 262144, 524288 }}, // siNodeCacheSize
{{ 60, 90, 120, 900, 1800 }}, // siNodeCacheAge
{{ 128000, 256000, 512000, 768000, 2048000 }}, // siTreeCacheSize
{{ 30, 60, 90, 120, 900 }}, // siTreeCacheAge
{{ 4096, 8192, 16384, 65536, 131072 }}, // siSLECacheSize
{{ 30, 60, 90, 120, 300 }}, // siSLECacheAge
{{ 32, 128, 256, 384, 768 }}, // siLedgerSize
{{ 30, 90, 180, 240, 900 }}, // siLedgerAge
{{ 4, 12, 24, 64, 128 }}, // siHashNodeDBCache
{{ 4, 12, 24, 64, 128 }}, // siTxnDBCache
{{ 4, 8, 16, 32, 128 }} // siLgrDBCache
}};
// This entire derived class is deprecated.
// For new config information use the style implied
@@ -182,11 +200,23 @@ public:
std::unordered_set<uint256, beast::uhash<>> features;
public:
Config()
: j_ {beast::Journal::getNullSink()}
{ }
Config() : j_ {beast::Journal::getNullSink()} {}
static
int
getSize(SizedItemName item, std::uint32_t nodeSize)
{
assert(item < sizedItems.size() && nodeSize < sizedItems[item].size());
return sizedItems[item][nodeSize];
}
int
getSize(SizedItemName item) const
{
assert(item < sizedItems.size());
return getSize(item, NODE_SIZE);
}
int getSize (SizedItemName) const;
/* Be very careful to make sure these bool params
are in the right order. */
void setup (std::string const& strConf, bool bQuiet,

View File

@@ -20,13 +20,13 @@
#ifndef RIPPLE_APP_DATA_DATABASECON_H_INCLUDED
#define RIPPLE_APP_DATA_DATABASECON_H_INCLUDED
#include <ripple/app/main/DBInit.h>
#include <ripple/core/Config.h>
#include <ripple/core/SociDB.h>
#include <boost/filesystem/path.hpp>
#include <mutex>
#include <string>
namespace soci {
class session;
}
@@ -86,10 +86,43 @@ public:
boost::filesystem::path dataDir;
};
DatabaseCon (Setup const& setup,
std::string const& name,
const char* initString[],
int countInit);
template<std::size_t N, std::size_t M>
DatabaseCon(
Setup const& setup,
std::string const& DBName,
std::array<char const*, N> const& pragma,
std::array<char const*, M> const& initSQL)
{
// Use temporary files or regular DB files?
auto const useTempFiles =
setup.standAlone &&
setup.startUp != Config::LOAD &&
setup.startUp != Config::LOAD_FILE &&
setup.startUp != Config::REPLAY;
boost::filesystem::path pPath =
useTempFiles ? "" : (setup.dataDir / DBName);
open(session_, "sqlite", pPath.string());
try
{
for (auto const& p : pragma)
{
soci::statement st = session_.prepare << p;
st.execute(true);
}
for (auto const& sql : initSQL)
{
soci::statement st = session_.prepare << sql;
st.execute(true);
}
}
catch (soci::soci_error&)
{
// TODO: We should at least log this error. It is annoying to wire
// a logger into every context, but there are other solutions.
}
}
soci::session& getSession()
{

View File

@@ -559,42 +559,6 @@ void Config::loadFromString (std::string const& fileContents)
}
}
int Config::getSize (SizedItemName item) const
{
SizedItem sizeTable[] = // tiny small medium large huge
{
{ siSweepInterval, { 10, 30, 60, 90, 120 } },
{ siLedgerFetch, { 2, 3, 5, 5, 8 } },
{ siNodeCacheSize, { 16384, 32768, 131072, 262144, 524288 } },
{ siNodeCacheAge, { 60, 90, 120, 900, 1800 } },
{ siTreeCacheSize, { 128000, 256000, 512000, 768000, 2048000 } },
{ siTreeCacheAge, { 30, 60, 90, 120, 900 } },
{ siSLECacheSize, { 4096, 8192, 16384, 65536, 131072 } },
{ siSLECacheAge, { 30, 60, 90, 120, 300 } },
{ siLedgerSize, { 32, 128, 256, 384, 768 } },
{ siLedgerAge, { 30, 90, 180, 240, 900 } },
{ siHashNodeDBCache, { 4, 12, 24, 64, 128 } },
{ siTxnDBCache, { 4, 12, 24, 64, 128 } },
{ siLgrDBCache, { 4, 8, 16, 32, 128 } },
};
for (int i = 0; i < (sizeof (sizeTable) / sizeof (SizedItem)); ++i)
{
if (sizeTable[i].item == item)
return sizeTable[i].sizes[NODE_SIZE];
}
assert (false);
return -1;
}
boost::filesystem::path Config::getDebugLogFile () const
{
auto log_file = DEBUG_LOGFILE;

View File

@@ -25,38 +25,6 @@
namespace ripple {
DatabaseCon::DatabaseCon (
Setup const& setup,
std::string const& strName,
const char* initStrings[],
int initCount)
{
auto const useTempFiles // Use temporary files or regular DB files?
= setup.standAlone &&
setup.startUp != Config::LOAD &&
setup.startUp != Config::LOAD_FILE &&
setup.startUp != Config::REPLAY;
boost::filesystem::path pPath = useTempFiles
? "" : (setup.dataDir / strName);
open (session_, "sqlite", pPath.string());
for (int i = 0; i < initCount; ++i)
{
try
{
soci::statement st = session_.prepare <<
initStrings[i];
st.execute(true);
}
catch (soci::soci_error&)
{
// TODO: We should at least log this error. It is annoying to wire
// a logger into every context, but there are other solutions.
}
}
}
DatabaseCon::Setup setup_DatabaseCon (Config const& c)
{
DatabaseCon::Setup setup;

View File

@@ -112,14 +112,14 @@ public:
/** Perform consistency checks on database. */
virtual void verify() = 0;
/** Returns the number of file handles the backend expects to need. */
virtual int fdlimit() const = 0;
/** Returns the number of file descriptors the backend expects to need. */
virtual int fdRequired() const = 0;
/** Returns true if the backend uses permanent storage. */
bool
backed() const
{
return fdlimit();
return fdRequired();
}
};

View File

@@ -204,9 +204,9 @@ public:
std::uint32_t
getFetchSize() const { return fetchSz_; }
/** Return the number of files needed by our backend(s) */
/** Returns the number of file descriptors the database expects to need */
int
fdlimit() const { return fdLimit_; }
fdRequired() const { return fdRequired_; }
void
onStop() override;
@@ -222,7 +222,7 @@ public:
protected:
beast::Journal j_;
Scheduler& scheduler_;
int fdLimit_ {0};
int fdRequired_ {0};
void
stopThreads();

View File

@@ -191,7 +191,7 @@ public:
}
int
fdlimit() const override
fdRequired() const override
{
return 0;
}

View File

@@ -297,7 +297,7 @@ public:
}
int
fdlimit() const override
fdRequired() const override
{
return 3;
}

View File

@@ -98,9 +98,9 @@ public:
{
}
/** Returns the number of file handles the backend expects to need */
/** Returns the number of file descriptors the backend expects to need */
int
fdlimit() const override
fdRequired() const override
{
return 0;
}

View File

@@ -99,7 +99,7 @@ public:
BatchWriter m_batch;
std::string m_name;
std::unique_ptr <rocksdb::DB> m_db;
int fdlimit_ = 2048;
int fdRequired_ = 2048;
rocksdb::Options m_options;
RocksDBBackend (int keyBytes, Section const& keyValues,
@@ -128,7 +128,7 @@ public:
}
if (get_if_exists (keyValues, "open_files", m_options.max_open_files))
fdlimit_ = m_options.max_open_files;
fdRequired_ = m_options.max_open_files;
if (keyValues.exists ("file_size_mb"))
{
@@ -405,11 +405,11 @@ public:
{
}
/** Returns the number of file handles the backend expects to need */
/** Returns the number of file descriptors the backend expects to need */
int
fdlimit() const override
fdRequired() const override
{
return fdlimit_;
return fdRequired_;
}
};

View File

@@ -42,9 +42,9 @@ DatabaseRotatingImp::DatabaseRotatingImp(
, archiveBackend_(std::move(archiveBackend))
{
if (writableBackend_)
fdLimit_ += writableBackend_->fdlimit();
fdRequired_ += writableBackend_->fdRequired();
if (archiveBackend_)
fdLimit_ += archiveBackend_->fdlimit();
fdRequired_ += archiveBackend_->fdRequired();
setParent(parent);
}

File diff suppressed because it is too large Load Diff

View File

@@ -157,7 +157,7 @@ public:
getCacheHitRate() override;
void
tune(int size, std::chrono::seconds age) override;
tune(int size, std::chrono::seconds age) override {};
void
sweep() override;
@@ -194,11 +194,11 @@ private:
// The name associated with the backend used with the shard store
std::string backendName_;
// Maximum disk space the DB can use (in bytes)
std::uint64_t maxDiskSpace_;
// Maximum storage space the shard store can utilize (in bytes)
std::uint64_t maxFileSz_;
// Disk space used to store the shards (in bytes)
std::uint64_t usedDiskSpace_ {0};
// Storage space utilized by the shard store (in bytes)
std::uint64_t fileSz_ {0};
// Each shard stores 16384 ledgers. The earliest shard may store
// less if the earliest ledger sequence truncates its beginning.
@@ -208,16 +208,12 @@ private:
// The earliest shard index
std::uint32_t const earliestShardIndex_;
// Average disk space a shard requires (in bytes)
std::uint64_t avgShardSz_;
// Shard cache tuning
int cacheSz_ {shardCacheSz};
std::chrono::seconds cacheAge_ {shardCacheAge};
// Average storage space required by a shard (in bytes)
std::uint64_t avgShardFileSz_;
// File name used to mark shards being imported from node store
static constexpr auto importMarker_ = "import";
std::shared_ptr<NodeObject>
fetchFrom(uint256 const& hash, std::uint32_t seq) override;
@@ -233,23 +229,19 @@ private:
findShardIndexToAdd(std::uint32_t validLedgerSeq,
std::lock_guard<std::mutex>&);
// Updates stats
// Set storage and file descriptor usage stats
// Lock must be held
void
updateStats(std::lock_guard<std::mutex>&);
setFileStats(std::lock_guard<std::mutex>&);
// Update status string
// Lock must be held
void
updateStatus(std::lock_guard<std::mutex>&);
std::pair<std::shared_ptr<PCache>, std::shared_ptr<NCache>>
selectCache(std::uint32_t seq);
// Returns the tune cache size divided by the number of shards
// Lock must be held
int
calcTargetCacheSz(std::lock_guard<std::mutex>&) const
{
return std::max(shardCacheSz, cacheSz_ / std::max(
1, static_cast<int>(complete_.size() + (incomplete_ ? 1 : 0))));
}
// Returns available storage space
std::uint64_t
available() const;

View File

@@ -19,27 +19,31 @@
#include <ripple/nodestore/impl/Shard.h>
#include <ripple/app/ledger/InboundLedger.h>
#include <ripple/app/main/DBInit.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/core/ConfigSections.h>
#include <ripple/nodestore/impl/DatabaseShardImp.h>
#include <ripple/nodestore/Manager.h>
#include <boost/algorithm/string.hpp>
#include <boost/range/adaptor/transformed.hpp>
#include <fstream>
namespace ripple {
namespace NodeStore {
Shard::Shard(DatabaseShard const& db, std::uint32_t index,
int cacheSz, std::chrono::seconds cacheAge, beast::Journal& j)
: index_(index)
Shard::Shard(
Application& app,
DatabaseShard const& db,
std::uint32_t index,
beast::Journal& j)
: app_(app)
, index_(index)
, firstSeq_(db.firstLedgerSeq(index))
, lastSeq_(std::max(firstSeq_, db.lastLedgerSeq(index)))
, maxLedgers_(index == db.earliestShardIndex() ?
lastSeq_ - firstSeq_ + 1 : db.ledgersPerShard())
, pCache_(std::make_shared<PCache>(
"shard " + std::to_string(index_),
cacheSz, cacheAge, stopwatch(), j))
, nCache_(std::make_shared<NCache>(
"shard " + std::to_string(index_),
stopwatch(), cacheSz, cacheAge))
, dir_(db.getRootDir() / std::to_string(index_))
, control_(dir_ / controlFileName)
, j_(j)
@@ -49,38 +53,47 @@ Shard::Shard(DatabaseShard const& db, std::uint32_t index,
}
bool
Shard::open(Section config, Scheduler& scheduler, nudb::context& ctx)
Shard::open(Scheduler& scheduler, nudb::context& ctx)
{
assert(!backend_);
using namespace boost::filesystem;
using namespace boost::beast::detail;
std::string const type (get<std::string>(config, "type", "nudb"));
Config const& config {app_.config()};
Section section {config.section(ConfigSection::shardDatabase())};
std::string const type (get<std::string>(section, "type", "nudb"));
auto factory {Manager::instance().find(type)};
if (!factory)
{
JLOG(j_.error()) <<
"shard " << index_ <<
": failed to create shard store type " << type;
" failed to create backend type " << type;
return false;
}
config.set("path", dir_.string());
section.set("path", dir_.string());
backend_ = factory->createInstance(
NodeObject::keyBytes, config, scheduler, ctx, j_);
NodeObject::keyBytes, section, scheduler, ctx, j_);
auto const preexist {exists(dir_)};
auto fail = [&](std::string msg)
auto fail = [this, preexist](std::string const& msg)
{
pCache_.reset();
nCache_.reset();
backend_.reset();
lgrSQLiteDB_.reset();
txSQLiteDB_.reset();
storedSeqs_.clear();
lastStored_.reset();
if (!preexist)
removeAll(dir_, j_);
if (!msg.empty())
{
JLOG(j_.error()) <<
"shard " << index_ << ": " << msg;
"shard " << index_ << " " << msg;
}
if (backend_)
backend_->close();
if (!preexist)
removeAll(dir_, j_);
return false;
};
@@ -112,100 +125,84 @@ Shard::open(Section config, Scheduler& scheduler, nudb::context& ctx)
if (boost::icl::first(storedSeqs_) < firstSeq_ ||
boost::icl::last(storedSeqs_) > lastSeq_)
{
return fail("invalid control file");
return fail("has an invalid control file");
}
if (boost::icl::length(storedSeqs_) >= maxLedgers_)
{
JLOG(j_.error()) <<
JLOG(j_.warn()) <<
"shard " << index_ <<
": found control file for complete shard";
storedSeqs_.clear();
complete_ = true;
" has a control file for complete shard";
setComplete();
remove_all(control_);
}
}
}
else
complete_ = true;
setComplete();
// Calculate file foot print of backend files
for (auto const& p : recursive_directory_iterator(dir_))
if (!is_directory(p))
fileSize_ += file_size(p);
setCache();
if (!initSQLite() || !setFileStats())
return fail({});
}
catch (std::exception const& e)
{
return fail(e.what());
return fail(std::string("exception ") +
e.what() + " in function " + __func__);
}
return true;
}
bool
Shard::setStored(std::shared_ptr<Ledger const> const& l)
Shard::setStored(std::shared_ptr<Ledger const> const& ledger)
{
assert(backend_&& !complete_);
if (boost::icl::contains(storedSeqs_, l->info().seq))
if (boost::icl::contains(storedSeqs_, ledger->info().seq))
{
JLOG(j_.debug()) <<
"shard " << index_ <<
" ledger seq " << l->info().seq <<
" already stored";
" has ledger sequence " << ledger->info().seq << " already stored";
return false;
}
if (!setSQLiteStored(ledger))
return false;
// Check if the shard is complete
if (boost::icl::length(storedSeqs_) >= maxLedgers_ - 1)
{
setComplete();
if (backend_->backed())
{
if (!removeAll(control_, j_))
return false;
// Update file foot print of backend files
using namespace boost::filesystem;
std::uint64_t sz {0};
try
{
for (auto const& p : recursive_directory_iterator(dir_))
if (!is_directory(p))
sz += file_size(p);
}
catch (const filesystem_error& e)
{
JLOG(j_.error()) <<
"exception: " << e.what();
fileSize_ = std::max(fileSize_, sz);
setCache();
if (!initSQLite() || !setFileStats())
return false;
}
fileSize_ = sz;
}
complete_ = true;
storedSeqs_.clear();
JLOG(j_.debug()) <<
"shard " << index_ <<
" ledger seq " << l->info().seq <<
" stored. Shard complete";
}
else
{
storedSeqs_.insert(l->info().seq);
lastStored_ = l;
storedSeqs_.insert(ledger->info().seq);
if (backend_->backed() && !saveControl())
return false;
JLOG(j_.debug()) <<
"shard " << index_ <<
" ledger seq " << l->info().seq <<
" stored";
}
JLOG(j_.debug()) <<
"shard " << index_ <<
" stored ledger sequence " << ledger->info().seq <<
(complete_ ? " and is complete" : "");
lastStored_ = ledger;
return true;
}
boost::optional<std::uint32_t>
Shard::prepare()
{
assert(backend_);
if (storedSeqs_.empty())
return lastSeq_;
return prevMissing(storedSeqs_, 1 + lastSeq_, firstSeq_);
@@ -214,6 +211,7 @@ Shard::prepare()
bool
Shard::contains(std::uint32_t seq) const
{
assert(backend_);
if (seq < firstSeq_ || seq > lastSeq_)
return false;
if (complete_)
@@ -221,44 +219,53 @@ Shard::contains(std::uint32_t seq) const
return boost::icl::contains(storedSeqs_, seq);
}
void
Shard::sweep()
{
assert(backend_);
pCache_->sweep();
nCache_->sweep();
}
bool
Shard::validate(Application& app)
Shard::validate()
{
uint256 hash;
std::uint32_t seq;
std::shared_ptr<Ledger> l;
std::shared_ptr<Ledger> ledger;
auto fail = [this](std::string const& msg)
{
JLOG(j_.error()) << "shard " << index_ << " " << msg;
return false;
};
// Find the hash of the last ledger in this shard
{
std::tie(l, seq, hash) = loadLedgerHelper(
std::tie(ledger, seq, hash) = loadLedgerHelper(
"WHERE LedgerSeq >= " + std::to_string(lastSeq_) +
" order by LedgerSeq desc limit 1", app, false);
if (!l)
{
JLOG(j_.error()) <<
"shard " << index_ <<
" unable to validate. No lookup data";
return false;
}
" order by LedgerSeq desc limit 1", app_, false);
if (!ledger)
return fail("is unable to validate due to lacking lookup data");
if (seq != lastSeq_)
{
l->setImmutable(app.config());
ledger->setImmutable(app_.config());
boost::optional<uint256> h;
try
{
h = hashOfSeq(*l, lastSeq_, j_);
h = hashOfSeq(*ledger, lastSeq_, j_);
}
catch (std::exception const& e)
{
JLOG(j_.error()) <<
"exception: " << e.what();
return false;
return fail(std::string("exception ") +
e.what() + " in function " + __func__);
}
if (!h)
{
JLOG(j_.error()) <<
"shard " << index_ <<
" No hash for last ledger seq " << lastSeq_;
return false;
return fail("is missing hash for last ledger sequence " +
std::to_string(lastSeq_));
}
hash = *h;
seq = lastSeq_;
@@ -266,9 +273,8 @@ Shard::validate(Application& app)
}
JLOG(j_.debug()) <<
"Validating shard " << index_ <<
" ledgers " << firstSeq_ <<
"-" << lastSeq_;
"shard " << index_ <<
" has ledger sequences " << firstSeq_ << "-" << lastSeq_;
// Use a short age to keep memory consumption low
auto const savedAge {pCache_->getTargetAge()};
@@ -282,44 +288,45 @@ Shard::validate(Application& app)
auto nObj = valFetch(hash);
if (!nObj)
break;
l = std::make_shared<Ledger>(
ledger = std::make_shared<Ledger>(
InboundLedger::deserializeHeader(makeSlice(nObj->getData()),
true), app.config(), *app.shardFamily());
if (l->info().hash != hash || l->info().seq != seq)
true), app_.config(), *app_.shardFamily());
if (ledger->info().seq != seq)
{
JLOG(j_.error()) <<
"ledger seq " << seq <<
" hash " << hash <<
" cannot be a ledger";
fail("encountered invalid ledger sequence " + std::to_string(seq));
break;
}
l->stateMap().setLedgerSeq(seq);
l->txMap().setLedgerSeq(seq);
l->setImmutable(app.config());
if (!l->stateMap().fetchRoot(
SHAMapHash {l->info().accountHash}, nullptr))
if (ledger->info().hash != hash)
{
JLOG(j_.error()) <<
"ledger seq " << seq <<
" missing Account State root";
fail("encountered invalid ledger hash " + to_string(hash) +
" on sequence " + std::to_string(seq));
break;
}
if (l->info().txHash.isNonZero())
ledger->stateMap().setLedgerSeq(seq);
ledger->txMap().setLedgerSeq(seq);
ledger->setImmutable(app_.config());
if (!ledger->stateMap().fetchRoot(
SHAMapHash {ledger->info().accountHash}, nullptr))
{
if (!l->txMap().fetchRoot(
SHAMapHash {l->info().txHash}, nullptr))
fail("is missing root STATE node on sequence " +
std::to_string(seq));
break;
}
if (ledger->info().txHash.isNonZero())
{
if (!ledger->txMap().fetchRoot(
SHAMapHash {ledger->info().txHash}, nullptr))
{
JLOG(j_.error()) <<
"ledger seq " << seq <<
" missing TX root";
fail("is missing root TXN node on sequence " +
std::to_string(seq));
break;
}
}
if (!valLedger(l, next))
if (!valLedger(ledger, next))
break;
hash = l->info().parentHash;
hash = ledger->info().parentHash;
--seq;
next = l;
next = ledger;
if (seq % 128 == 0)
pCache_->sweep();
}
@@ -330,79 +337,87 @@ Shard::validate(Application& app)
if (seq >= firstSeq_)
{
JLOG(j_.error()) <<
"shard " << index_ <<
(complete_ ? " is invalid, failed" : " is incomplete, stopped") <<
" at seq " << seq <<
" hash " << hash;
return false;
return fail(std::string(" is ") +
(complete_ ? "invalid, failed" : "incomplete, stopped") +
" on hash " + to_string(hash) + " on sequence " +
std::to_string(seq));
}
JLOG(j_.debug()) <<
"shard " << index_ <<
" is complete.";
"shard " << index_ << " is valid and complete";
return true;
}
bool
Shard::valLedger(std::shared_ptr<Ledger const> const& l,
Shard::valLedger(std::shared_ptr<Ledger const> const& ledger,
std::shared_ptr<Ledger const> const& next)
{
if (l->info().hash.isZero() || l->info().accountHash.isZero())
auto fail = [this](std::string const& msg)
{
JLOG(j_.error()) <<
"invalid ledger";
JLOG(j_.error()) << "shard " << index_ << " " << msg;
return false;
};
if (ledger->info().hash.isZero())
{
return fail("encountered a zero ledger hash on sequence " +
std::to_string(ledger->info().seq));
}
if (ledger->info().accountHash.isZero())
{
return fail("encountered a zero account hash on sequence " +
std::to_string(ledger->info().seq));
}
bool error {false};
auto f = [&, this](SHAMapAbstractNode& node) {
auto f = [this, &error](SHAMapAbstractNode& node)
{
if (!valFetch(node.getNodeHash().as_uint256()))
error = true;
return !error;
};
// Validate the state map
if (l->stateMap().getHash().isNonZero())
if (ledger->stateMap().getHash().isNonZero())
{
if (!l->stateMap().isValid())
if (!ledger->stateMap().isValid())
{
JLOG(j_.error()) <<
"invalid state map";
return false;
return fail("has an invalid state map on sequence " +
std::to_string(ledger->info().seq));
}
try
{
if (next && next->info().parentHash == l->info().hash)
l->stateMap().visitDifferences(&next->stateMap(), f);
if (next && next->info().parentHash == ledger->info().hash)
ledger->stateMap().visitDifferences(&next->stateMap(), f);
else
l->stateMap().visitNodes(f);
ledger->stateMap().visitNodes(f);
}
catch (std::exception const& e)
{
JLOG(j_.error()) <<
"exception: " << e.what();
return false;
return fail(std::string("exception ") +
e.what() + " in function " + __func__);
}
if (error)
return false;
}
// Validate the tx map
if (l->info().txHash.isNonZero())
// Validate the transaction map
if (ledger->info().txHash.isNonZero())
{
if (!l->txMap().isValid())
if (!ledger->txMap().isValid())
{
JLOG(j_.error()) <<
"invalid transaction map";
return false;
return fail("has an invalid transaction map on sequence " +
std::to_string(ledger->info().seq));
}
try
{
l->txMap().visitNodes(f);
ledger->txMap().visitNodes(f);
}
catch (std::exception const& e)
{
JLOG(j_.error()) <<
"exception: " << e.what();
return false;
return fail(std::string("exception ") +
e.what() + " in function " + __func__);
}
if (error)
return false;
@@ -415,6 +430,11 @@ Shard::valFetch(uint256 const& hash)
{
assert(backend_);
std::shared_ptr<NodeObject> nObj;
auto fail = [this](std::string const& msg)
{
JLOG(j_.error()) << "shard " << index_ << " " << msg;
};
try
{
switch (backend_->fetch(hash.begin(), &nObj))
@@ -423,29 +443,324 @@ Shard::valFetch(uint256 const& hash)
break;
case notFound:
{
JLOG(j_.error()) <<
"NodeObject not found. hash " << hash;
fail("is missing node object on hash " + to_string(hash));
break;
}
case dataCorrupt:
{
JLOG(j_.error()) <<
"NodeObject is corrupt. hash " << hash;
fail("has a corrupt node object on hash " + to_string(hash));
break;
}
default:
{
JLOG(j_.error()) <<
"unknown error. hash " << hash;
fail("encountered unknown error on hash " + to_string(hash));
}
}
catch (std::exception const& e)
{
fail(std::string("exception ") +
e.what() + " in function " + __func__);
}
return nObj;
}
void
Shard::setComplete()
{
storedSeqs_.clear();
complete_ = true;
}
void
Shard::setCache()
{
// complete shards use the smallest cache and
// fastest expiration to reduce memory consumption.
// The incomplete shard is set according to configuration.
if (!pCache_)
{
auto const name {"shard " + std::to_string(index_)};
auto const sz {complete_ ?
Config::getSize(siNodeCacheSize, 0) :
app_.config().getSize(siNodeCacheSize)};
auto const age {std::chrono::seconds{complete_ ?
Config::getSize(siNodeCacheAge, 0) :
app_.config().getSize(siNodeCacheAge)}};
pCache_ = std::make_shared<PCache>(name, sz, age, stopwatch(), j_);
nCache_ = std::make_shared<NCache>(name, stopwatch(), sz, age);
}
else
{
auto const sz {Config::getSize(siNodeCacheSize, 0)};
pCache_->setTargetSize(sz);
nCache_->setTargetSize(sz);
auto const age {std::chrono::seconds{
Config::getSize(siNodeCacheAge, 0)}};
pCache_->setTargetAge(age);
nCache_->setTargetAge(age);
}
}
bool
Shard::initSQLite()
{
Config const& config {app_.config()};
DatabaseCon::Setup setup;
setup.startUp = config.START_UP;
setup.standAlone = config.standalone();
setup.dataDir = dir_;
try
{
if (complete_)
{
using namespace boost::filesystem;
// Remove WAL files if they exist
for (auto const& d : directory_iterator(dir_))
{
if (is_regular_file(d) &&
boost::iends_with(extension(d), "-wal"))
{
// Closing the session forces a checkpoint
if (!lgrSQLiteDB_)
{
lgrSQLiteDB_ = std::make_unique <DatabaseCon>(
setup,
LgrDBName,
LgrDBPragma,
LgrDBInit);
}
lgrSQLiteDB_->getSession().close();
if (!txSQLiteDB_)
{
txSQLiteDB_ = std::make_unique <DatabaseCon>(
setup,
TxDBName,
TxDBPragma,
TxDBInit);
}
txSQLiteDB_->getSession().close();
break;
}
}
lgrSQLiteDB_ = std::make_unique <DatabaseCon>(
setup,
LgrDBName,
CompleteShardDBPragma,
LgrDBInit);
lgrSQLiteDB_->getSession() <<
boost::str(boost::format("PRAGMA cache_size=-%d;") %
kilobytes(Config::getSize(siLgrDBCache, 0)));
txSQLiteDB_ = std::make_unique <DatabaseCon>(
setup,
TxDBName,
CompleteShardDBPragma,
TxDBInit);
txSQLiteDB_->getSession() <<
boost::str(boost::format("PRAGMA cache_size=-%d;") %
kilobytes(Config::getSize(siTxnDBCache, 0)));
}
else
{
// The incomplete shard uses a Write Ahead Log for performance
lgrSQLiteDB_ = std::make_unique <DatabaseCon>(
setup,
LgrDBName,
LgrDBPragma,
LgrDBInit);
lgrSQLiteDB_->getSession() <<
boost::str(boost::format("PRAGMA cache_size=-%d;") %
kilobytes(config.getSize(siLgrDBCache)));
lgrSQLiteDB_->setupCheckpointing(&app_.getJobQueue(), app_.logs());
txSQLiteDB_ = std::make_unique <DatabaseCon>(
setup,
TxDBName,
TxDBPragma,
TxDBInit);
txSQLiteDB_->getSession() <<
boost::str(boost::format("PRAGMA cache_size=-%d;") %
kilobytes(config.getSize(siTxnDBCache)));
txSQLiteDB_->setupCheckpointing(&app_.getJobQueue(), app_.logs());
}
}
catch (std::exception const& e)
{
JLOG(j_.error()) <<
"exception: " << e.what();
"shard " << index_ <<
" exception " << e.what() <<
" in function " << __func__;
return false;
}
return nObj;
return true;
}
bool
Shard::setSQLiteStored(std::shared_ptr<Ledger const> const& ledger)
{
auto const seq {ledger->info().seq};
assert(backend_ && !complete_);
assert(!boost::icl::contains(storedSeqs_, seq));
try
{
{
auto& session {txSQLiteDB_->getSession()};
soci::transaction tr(session);
session <<
"DELETE FROM Transactions WHERE LedgerSeq = :seq;"
, soci::use(seq);
session <<
"DELETE FROM AccountTransactions WHERE LedgerSeq = :seq;"
, soci::use(seq);
if (ledger->info().txHash.isNonZero())
{
auto const sSeq {std::to_string(seq)};
if (!ledger->txMap().isValid())
{
JLOG(j_.error()) <<
"shard " << index_ <<
" has an invalid transaction map" <<
" on sequence " << sSeq;
return false;
}
for (auto const& item : ledger->txs)
{
auto const txID {item.first->getTransactionID()};
auto const sTxID {to_string(txID)};
auto const txMeta {std::make_shared<TxMeta>(
txID, ledger->seq(), *item.second)};
session <<
"DELETE FROM AccountTransactions WHERE TransID = :txID;"
, soci::use(sTxID);
auto const& accounts = txMeta->getAffectedAccounts(j_);
if (!accounts.empty())
{
auto const s(boost::str(boost::format(
"('%s','%s',%s,%s)")
% sTxID
% "%s"
% sSeq
% std::to_string(txMeta->getIndex())));
std::string sql;
sql.reserve((accounts.size() + 1) * 128);
sql = "INSERT INTO AccountTransactions "
"(TransID, Account, LedgerSeq, TxnSeq) VALUES ";
sql += boost::algorithm::join(
accounts | boost::adaptors::transformed(
[&](AccountID const& accountID)
{
return boost::str(boost::format(s)
% ripple::toBase58(accountID));
}),
",");
sql += ';';
session << sql;
JLOG(j_.trace()) <<
"shard " << index_ <<
" account transaction: " << sql;
}
else
{
JLOG(j_.warn()) <<
"shard " << index_ <<
" transaction in ledger " << sSeq <<
" affects no accounts";
}
Serializer s;
item.second->add(s);
session <<
(STTx::getMetaSQLInsertReplaceHeader() +
item.first->getMetaSQL(
seq,
sqlEscape(std::move(s.modData())))
+ ';');
}
}
tr.commit ();
}
auto& session {lgrSQLiteDB_->getSession()};
soci::transaction tr(session);
session <<
"DELETE FROM Ledgers WHERE LedgerSeq = :seq;"
, soci::use(seq);
session <<
"INSERT OR REPLACE INTO Ledgers ("
"LedgerHash, LedgerSeq, PrevHash, TotalCoins, ClosingTime,"
"PrevClosingTime, CloseTimeRes, CloseFlags, AccountSetHash,"
"TransSetHash)"
"VALUES ("
":ledgerHash, :ledgerSeq, :prevHash, :totalCoins, :closingTime,"
":prevClosingTime, :closeTimeRes, :closeFlags, :accountSetHash,"
":transSetHash);",
soci::use(to_string(ledger->info().hash)),
soci::use(seq),
soci::use(to_string(ledger->info().parentHash)),
soci::use(to_string(ledger->info().drops)),
soci::use(ledger->info().closeTime.time_since_epoch().count()),
soci::use(ledger->info().parentCloseTime.time_since_epoch().count()),
soci::use(ledger->info().closeTimeResolution.count()),
soci::use(ledger->info().closeFlags),
soci::use(to_string(ledger->info().accountHash)),
soci::use(to_string(ledger->info().txHash));
tr.commit();
}
catch (std::exception const& e)
{
JLOG(j_.error()) <<
"shard " << index_ <<
" exception " << e.what() <<
" in function " << __func__;
return false;
}
return true;
}
bool
Shard::setFileStats()
{
fileSz_ = 0;
fdRequired_ = 0;
if (backend_->backed())
{
try
{
using namespace boost::filesystem;
for (auto const& d : directory_iterator(dir_))
{
if (is_regular_file(d))
{
fileSz_ += file_size(d);
++fdRequired_;
}
}
}
catch (std::exception const& e)
{
JLOG(j_.error()) <<
"shard " << index_ <<
" exception " << e.what() <<
" in function " << __func__;
return false;
}
}
return true;
}
bool
@@ -455,10 +770,10 @@ Shard::saveControl()
if (!ofs.is_open())
{
JLOG(j_.fatal()) <<
"shard " << index_ <<
" unable to save control file";
"shard " << index_ << " is unable to save control file";
return false;
}
boost::archive::text_oarchive ar(ofs);
ar & storedSeqs_;
return true;

View File

@@ -23,6 +23,7 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/basics/BasicConfig.h>
#include <ripple/basics/RangeSet.h>
#include <ripple/core/DatabaseCon.h>
#include <ripple/nodestore/NodeObject.h>
#include <ripple/nodestore/Scheduler.h>
@@ -66,14 +67,17 @@ class DatabaseShard;
class Shard
{
public:
Shard(DatabaseShard const& db, std::uint32_t index, int cacheSz,
std::chrono::seconds cacheAge, beast::Journal& j);
Shard(
Application& app,
DatabaseShard const& db,
std::uint32_t index,
beast::Journal& j);
bool
open(Section config, Scheduler& scheduler, nudb::context& ctx);
open(Scheduler& scheduler, nudb::context& ctx);
bool
setStored(std::shared_ptr<Ledger const> const& l);
setStored(std::shared_ptr<Ledger const> const& ledger);
boost::optional<std::uint32_t>
prepare();
@@ -81,40 +85,35 @@ public:
bool
contains(std::uint32_t seq) const;
void
sweep();
bool
validate(Application& app);
validate();
std::uint32_t
index() const {return index_;}
bool
complete() const {return complete_;}
complete() const {assert(backend_); return complete_;}
std::shared_ptr<PCache>&
pCache() {return pCache_;}
pCache() {assert(backend_); return pCache_;}
std::shared_ptr<NCache>&
nCache() {return nCache_;}
nCache() {assert(backend_); return nCache_;}
std::uint64_t
fileSize() const {return fileSize_;}
std::shared_ptr<Backend> const&
getBackend() const
{
assert(backend_);
return backend_;
}
fileSize() const {assert(backend_); return fileSz_;}
std::uint32_t
fdlimit() const
{
assert(backend_);
return backend_->fdlimit();
}
fdRequired() const {assert(backend_); return fdRequired_;}
std::shared_ptr<Backend> const&
getBackend() const {assert(backend_); return backend_;}
std::shared_ptr<Ledger const>
lastStored() {return lastStored_;}
lastStored() {assert(backend_); return lastStored_;}
private:
friend class boost::serialization::access;
@@ -126,6 +125,8 @@ private:
static constexpr auto controlFileName = "control.txt";
Application& app_;
// Shard Index
std::uint32_t const index_;
@@ -152,12 +153,21 @@ private:
// Path to control file
boost::filesystem::path const control_;
// Disk space utilized by the shard
std::uint64_t fileSize_ {0};
// Storage space utilized by the shard
std::uint64_t fileSz_;
// Number of file descriptors required by the shard
std::uint32_t fdRequired_;
// NuDB key/value store for node objects
std::shared_ptr<Backend> backend_;
// Ledger SQLite database used for indexes
std::unique_ptr<DatabaseCon> lgrSQLiteDB_;
// Transaction SQLite database used for indexes
std::unique_ptr<DatabaseCon> txSQLiteDB_;
beast::Journal j_;
// True if shard has its entire ledger range stored
@@ -172,7 +182,7 @@ private:
// Validate this ledger by walking its SHAMaps
// and verifying each merkle tree
bool
valLedger(std::shared_ptr<Ledger const> const& l,
valLedger(std::shared_ptr<Ledger const> const& ledger,
std::shared_ptr<Ledger const> const& next);
// Fetches from the backend and will log
@@ -180,9 +190,25 @@ private:
std::shared_ptr<NodeObject>
valFetch(uint256 const& hash);
// Calculate the file foot print of the backend files
// Marks shard immutable, having stored all of its ledgers
void
updateFileSize();
setComplete();
// Set the backend cache
void
setCache();
// Open/Create SQLite databases
bool
initSQLite();
// Create SQLite entries for a ledger stored in this shard's backend
bool
setSQLiteStored(std::shared_ptr<Ledger const> const& ledger);
// Set storage and file descriptor usage stats
bool
setFileStats();
// Save the control file for an incomplete shard
bool

View File

@@ -34,8 +34,6 @@ enum
// Expiration time for cached nodes
std::chrono::seconds constexpr cacheTargetAge = std::chrono::minutes{5};
auto constexpr shardCacheSz = 16384;
std::chrono::seconds constexpr shardCacheAge = std::chrono::minutes{1};
}
}

View File

@@ -21,4 +21,3 @@
#include <ripple/app/main/Application.cpp>
#include <ripple/app/main/BasicApp.cpp>
#include <ripple/app/main/CollectorManager.cpp>
#include <ripple/app/main/DBInit.cpp>

View File

@@ -227,7 +227,11 @@ public:
{
DatabaseCon::Setup setup;
setup.dataDir = getDatabasePath ();
DatabaseCon dbCon(setup, dbName, WalletDBInit, WalletDBCount);
DatabaseCon dbCon(
setup,
dbName.data(),
std::array<char const*, 0>(),
WalletDBInit);
auto getPopulatedManifests =
[](ManifestCache const& cache) -> std::vector<Manifest const*>