diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index fd8f69177c..e50b20cd6c 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -365,7 +365,6 @@ target_sources (rippled PRIVATE src/ripple/app/main/Main.cpp src/ripple/app/main/NodeIdentity.cpp src/ripple/app/main/NodeStoreScheduler.cpp - src/ripple/app/reporting/DBHelpers.cpp src/ripple/app/reporting/ReportingETL.cpp src/ripple/app/reporting/ETLSource.cpp src/ripple/app/reporting/P2pProxy.cpp @@ -398,6 +397,13 @@ target_sources (rippled PRIVATE src/ripple/app/paths/impl/DirectStep.cpp src/ripple/app/paths/impl/PaySteps.cpp src/ripple/app/paths/impl/XRPEndpointStep.cpp + src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp + src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp + src/ripple/app/rdb/impl/RelationalDBInterface.cpp + src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp + src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp + src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp + src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp src/ripple/app/tx/impl/ApplyContext.cpp src/ripple/app/tx/impl/BookTip.cpp src/ripple/app/tx/impl/CancelCheck.cpp diff --git a/Builds/levelization/results/loops.txt b/Builds/levelization/results/loops.txt index c4589932bd..1afba8bc71 100644 --- a/Builds/levelization/results/loops.txt +++ b/Builds/levelization/results/loops.txt @@ -13,6 +13,9 @@ Loop: ripple.app ripple.nodestore Loop: ripple.app ripple.overlay ripple.overlay ~= ripple.app +Loop: ripple.app ripple.peerfinder + ripple.peerfinder ~= ripple.app + Loop: ripple.app ripple.rpc ripple.rpc > ripple.app diff --git a/src/ripple/app/consensus/RCLValidations.cpp b/src/ripple/app/consensus/RCLValidations.cpp index 482fe175df..b22b73c4e5 100644 --- a/src/ripple/app/consensus/RCLValidations.cpp +++ b/src/ripple/app/consensus/RCLValidations.cpp @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 40e2b7abe5..5ef4851ba6 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -29,14 +29,14 @@ #include #include #include -#include +#include +#include #include #include #include #include #include #include -#include #include #include #include @@ -927,196 +927,14 @@ saveValidatedLedger( return true; } - // TODO(tom): Fix this hard-coded SQL! - JLOG(j.trace()) << "saveValidatedLedger " << (current ? "" : "fromAcquire ") - << seq; - - if (!ledger->info().accountHash.isNonZero()) - { - JLOG(j.fatal()) << "AH is zero: " << getJson({*ledger, {}}); - assert(false); - } - - if (ledger->info().accountHash != ledger->stateMap().getHash().as_uint256()) - { - JLOG(j.fatal()) << "sAL: " << ledger->info().accountHash - << " != " << ledger->stateMap().getHash(); - JLOG(j.fatal()) << "saveAcceptedLedger: seq=" << seq - << ", current=" << current; - assert(false); - } - - assert(ledger->info().txHash == ledger->txMap().getHash().as_uint256()); - - // Save the ledger header in the hashed object store - { - Serializer s(128); - s.add32(HashPrefix::ledgerMaster); - addRaw(ledger->info(), s); - app.getNodeStore().store( - hotLEDGER, std::move(s.modData()), ledger->info().hash, seq); - } - - AcceptedLedger::pointer aLedger; - try - { - aLedger = app.getAcceptedLedgerCache().fetch(ledger->info().hash); - if (!aLedger) - { - aLedger = std::make_shared(ledger, app); - app.getAcceptedLedgerCache().canonicalize_replace_client( - ledger->info().hash, aLedger); - } - } - catch (std::exception const&) - { - JLOG(j.warn()) << "An accepted ledger was missing nodes"; - app.getLedgerMaster().failedSave(seq, ledger->info().hash); - // Clients can now trust the database for information about this - // ledger sequence. - app.pendingSaves().finishWork(seq); - return false; - } - - if (!app.config().reporting()) - { - static boost::format deleteLedger( - "DELETE FROM Ledgers WHERE LedgerSeq = %u;"); - static boost::format deleteTrans1( - "DELETE FROM Transactions WHERE LedgerSeq = %u;"); - static boost::format deleteTrans2( - "DELETE FROM AccountTransactions WHERE LedgerSeq = %u;"); - static boost::format deleteAcctTrans( - "DELETE FROM AccountTransactions WHERE TransID = '%s';"); - - { - auto db = app.getLedgerDB().checkoutDb(); - *db << boost::str(deleteLedger % seq); - } - - if (app.config().useTxTables()) - { - auto db = app.getTxnDB().checkoutDb(); - - soci::transaction tr(*db); - - *db << boost::str(deleteTrans1 % seq); - *db << boost::str(deleteTrans2 % seq); - - std::string const ledgerSeq(std::to_string(seq)); - - for (auto const& [_, acceptedLedgerTx] : aLedger->getMap()) - { - (void)_; - uint256 transactionID = acceptedLedgerTx->getTransactionID(); - - std::string const txnId(to_string(transactionID)); - std::string const txnSeq( - std::to_string(acceptedLedgerTx->getTxnSeq())); - - *db << boost::str(deleteAcctTrans % transactionID); - - auto const& accts = acceptedLedgerTx->getAffected(); - - if (!accts.empty()) - { - std::string sql( - "INSERT INTO AccountTransactions " - "(TransID, Account, LedgerSeq, TxnSeq) VALUES "); - - // Try to make an educated guess on how much space we'll - // need for our arguments. In argument order we have: 64 - // + 34 + 10 + 10 = 118 + 10 extra = 128 bytes - sql.reserve(sql.length() + (accts.size() * 128)); - - bool first = true; - for (auto const& account : accts) - { - if (!first) - sql += ", ('"; - else - { - sql += "('"; - first = false; - } - - sql += txnId; - sql += "','"; - sql += app.accountIDCache().toBase58(account); - sql += "',"; - sql += ledgerSeq; - sql += ","; - sql += txnSeq; - sql += ")"; - } - sql += ";"; - JLOG(j.trace()) << "ActTx: " << sql; - *db << sql; - } - else - { - JLOG(j.warn()) << "Transaction in ledger " << seq - << " affects no accounts"; - JLOG(j.warn()) << acceptedLedgerTx->getTxn()->getJson( - JsonOptions::none); - } - - *db - << (STTx::getMetaSQLInsertReplaceHeader() + - acceptedLedgerTx->getTxn()->getMetaSQL( - seq, acceptedLedgerTx->getEscMeta()) + - ";"); - - app.getMasterTransaction().inLedger(transactionID, seq); - } - - tr.commit(); - } - - { - static std::string addLedger( - R"sql(INSERT OR REPLACE INTO Ledgers - (LedgerHash,LedgerSeq,PrevHash,TotalCoins,ClosingTime,PrevClosingTime, - CloseTimeRes,CloseFlags,AccountSetHash,TransSetHash) - VALUES - (:ledgerHash,:ledgerSeq,:prevHash,:totalCoins,:closingTime,:prevClosingTime, - :closeTimeRes,:closeFlags,:accountSetHash,:transSetHash);)sql"); - - auto db(app.getLedgerDB().checkoutDb()); - - soci::transaction tr(*db); - - auto const hash = to_string(ledger->info().hash); - auto const parentHash = to_string(ledger->info().parentHash); - auto const drops = to_string(ledger->info().drops); - auto const closeTime = - ledger->info().closeTime.time_since_epoch().count(); - auto const parentCloseTime = - ledger->info().parentCloseTime.time_since_epoch().count(); - auto const closeTimeResolution = - ledger->info().closeTimeResolution.count(); - auto const closeFlags = ledger->info().closeFlags; - auto const accountHash = to_string(ledger->info().accountHash); - auto const txHash = to_string(ledger->info().txHash); - - *db << addLedger, soci::use(hash), soci::use(seq), - soci::use(parentHash), soci::use(drops), soci::use(closeTime), - soci::use(parentCloseTime), soci::use(closeTimeResolution), - soci::use(closeFlags), soci::use(accountHash), - soci::use(txHash); - - tr.commit(); - } - } - else - { - assert(false); - } + auto res = dynamic_cast( + &app.getRelationalDBInterface()) + ->saveValidatedLedger(ledger, current); // Clients can now trust the database for // information about this ledger sequence. app.pendingSaves().finishWork(seq); - return true; + return res; } /** Save, or arrange to save, a fully-validated ledger @@ -1188,75 +1006,16 @@ Ledger::invariants() const //------------------------------------------------------------------------------ /* - * Load a ledger from the database. + * Make ledger using info loaded from database. * - * @param sqlSuffix: Additional string to append to the sql query. - * (typically a where clause). + * @param LedgerInfo: Ledger information. + * @param app: Link to the Application. * @param acquire: Acquire the ledger if not found locally. - * @return The ledger, ledger sequence, and ledger hash. + * @return Shared pointer to the ledger. */ -std::tuple, std::uint32_t, uint256> -loadLedgerHelper(std::string const& sqlSuffix, Application& app, bool acquire) +std::shared_ptr +loadLedgerHelper(LedgerInfo const& info, Application& app, bool acquire) { - uint256 ledgerHash{}; - std::uint32_t ledgerSeq{0}; - - auto db = app.getLedgerDB().checkoutDb(); - - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional sLedgerHash, sPrevHash, sAccountHash, - sTransHash; - boost::optional totDrops, closingTime, prevClosingTime, - closeResolution, closeFlags, ledgerSeq64; - - std::string const sql = - "SELECT " - "LedgerHash, PrevHash, AccountSetHash, TransSetHash, " - "TotalCoins," - "ClosingTime, PrevClosingTime, CloseTimeRes, CloseFlags," - "LedgerSeq from Ledgers " + - sqlSuffix + ";"; - - *db << sql, soci::into(sLedgerHash), soci::into(sPrevHash), - soci::into(sAccountHash), soci::into(sTransHash), soci::into(totDrops), - soci::into(closingTime), soci::into(prevClosingTime), - soci::into(closeResolution), soci::into(closeFlags), - soci::into(ledgerSeq64); - - if (!db->got_data()) - { - auto stream = app.journal("Ledger").debug(); - JLOG(stream) << "Ledger not found: " << sqlSuffix; - return std::make_tuple( - std::shared_ptr(), ledgerSeq, ledgerHash); - } - - ledgerSeq = rangeCheckedCast(ledgerSeq64.value_or(0)); - - uint256 prevHash{}, accountHash{}, transHash{}; - if (sLedgerHash) - (void)ledgerHash.parseHex(*sLedgerHash); - if (sPrevHash) - (void)prevHash.parseHex(*sPrevHash); - if (sAccountHash) - (void)accountHash.parseHex(*sAccountHash); - if (sTransHash) - (void)transHash.parseHex(*sTransHash); - - using time_point = NetClock::time_point; - using duration = NetClock::duration; - - LedgerInfo info; - info.parentHash = prevHash; - info.txHash = transHash; - info.accountHash = accountHash; - info.drops = totDrops.value_or(0); - info.closeTime = time_point{duration{closingTime.value_or(0)}}; - info.parentCloseTime = time_point{duration{prevClosingTime.value_or(0)}}; - info.closeFlags = closeFlags.value_or(0); - info.closeTimeResolution = duration{closeResolution.value_or(0)}; - info.seq = ledgerSeq; - bool loaded; auto ledger = std::make_shared( info, @@ -1269,7 +1028,7 @@ loadLedgerHelper(std::string const& sqlSuffix, Application& app, bool acquire) if (!loaded) ledger.reset(); - return std::make_tuple(ledger, ledgerSeq, ledgerHash); + return ledger; } static void @@ -1288,421 +1047,41 @@ finishLoadByIndexOrHash( ledger->setFull(); } -// Load the ledger info for the specified ledger/s from the database -// @param whichLedger specifies the ledger to load via ledger sequence, ledger -// hash, a range of ledgers, or std::monostate (which loads the most recent) -// @param app Application -// @return vector of LedgerInfos -static std::vector -loadLedgerInfosPostgres( - std::variant< - std::monostate, - uint256, - uint32_t, - std::pair> const& whichLedger, - Application& app) -{ - std::vector infos; -#ifdef RIPPLED_REPORTING - auto log = app.journal("Ledger"); - assert(app.config().reporting()); - std::stringstream sql; - sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, " - "total_coins, closing_time, prev_closing_time, close_time_res, " - "close_flags, ledger_seq FROM ledgers "; - - uint32_t expNumResults = 1; - - if (auto ledgerSeq = std::get_if(&whichLedger)) - { - sql << "WHERE ledger_seq = " + std::to_string(*ledgerSeq); - } - else if (auto ledgerHash = std::get_if(&whichLedger)) - { - sql << ("WHERE ledger_hash = \'\\x" + strHex(*ledgerHash) + "\'"); - } - else if ( - auto minAndMax = - std::get_if>(&whichLedger)) - { - expNumResults = minAndMax->second - minAndMax->first; - - sql - << ("WHERE ledger_seq >= " + std::to_string(minAndMax->first) + - " AND ledger_seq <= " + std::to_string(minAndMax->second)); - } - else - { - sql << ("ORDER BY ledger_seq desc LIMIT 1"); - } - sql << ";"; - - JLOG(log.trace()) << __func__ << " : sql = " << sql.str(); - - auto res = PgQuery(app.getPgPool())(sql.str().data()); - if (!res) - { - JLOG(log.error()) << __func__ << " : Postgres response is null - sql = " - << sql.str(); - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(log.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - sql = " << sql.str(); - assert(false); - return {}; - } - - JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(log.debug()) << __func__ - << " : Ledger not found. sql = " << sql.str(); - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 10) - { - JLOG(log.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 10, but got " - << res.nfields() << " . sql = " << sql.str(); - assert(false); - return {}; - } - } - - for (size_t i = 0; i < res.ntuples(); ++i) - { - char const* hash = res.c_str(i, 0); - char const* prevHash = res.c_str(i, 1); - char const* accountHash = res.c_str(i, 2); - char const* txHash = res.c_str(i, 3); - std::int64_t totalCoins = res.asBigInt(i, 4); - std::int64_t closeTime = res.asBigInt(i, 5); - std::int64_t parentCloseTime = res.asBigInt(i, 6); - std::int64_t closeTimeRes = res.asBigInt(i, 7); - std::int64_t closeFlags = res.asBigInt(i, 8); - std::int64_t ledgerSeq = res.asBigInt(i, 9); - - JLOG(log.trace()) << __func__ << " - Postgres response = " << hash - << " , " << prevHash << " , " << accountHash << " , " - << txHash << " , " << totalCoins << ", " << closeTime - << ", " << parentCloseTime << ", " << closeTimeRes - << ", " << closeFlags << ", " << ledgerSeq - << " - sql = " << sql.str(); - JLOG(log.debug()) << __func__ - << " - Successfully fetched ledger with sequence = " - << ledgerSeq << " from Postgres"; - - using time_point = NetClock::time_point; - using duration = NetClock::duration; - - LedgerInfo info; - if (!info.parentHash.parseHex(prevHash + 2)) - assert(false); - if (!info.txHash.parseHex(txHash + 2)) - assert(false); - if (!info.accountHash.parseHex(accountHash + 2)) - assert(false); - info.drops = totalCoins; - info.closeTime = time_point{duration{closeTime}}; - info.parentCloseTime = time_point{duration{parentCloseTime}}; - info.closeFlags = closeFlags; - info.closeTimeResolution = duration{closeTimeRes}; - info.seq = ledgerSeq; - if (!info.hash.parseHex(hash + 2)) - assert(false); - info.validated = true; - infos.push_back(info); - } - -#endif - return infos; -} - -// Load a ledger from Postgres -// @param whichLedger specifies sequence or hash of ledger. Passing -// std::monostate loads the most recent ledger -// @param app the Application -// @return tuple of (ledger, sequence, hash) -static std::tuple, std::uint32_t, uint256> -loadLedgerHelperPostgres( - std::variant const& whichLedger, - Application& app) -{ - std::vector infos; - std::visit( - [&infos, &app](auto&& arg) { - infos = loadLedgerInfosPostgres(arg, app); - }, - whichLedger); - assert(infos.size() <= 1); - if (!infos.size()) - return std::make_tuple(std::shared_ptr(), 0, uint256{}); - LedgerInfo info = infos[0]; - bool loaded; - auto ledger = std::make_shared( - info, - loaded, - false, - app.config(), - app.getNodeFamily(), - app.journal("Ledger")); - - if (!loaded) - ledger.reset(); - - return std::make_tuple(ledger, info.seq, info.hash); -} - std::tuple, std::uint32_t, uint256> getLatestLedger(Application& app) { - if (app.config().reporting()) - return loadLedgerHelperPostgres({}, app); - else - return loadLedgerHelper("order by LedgerSeq desc limit 1", app); -} - -// Load a ledger by index (AKA sequence) from Postgres -// @param ledgerIndex the ledger index (or sequence) to load -// @param app reference to Application -// @return the loaded ledger -static std::shared_ptr -loadByIndexPostgres(std::uint32_t ledgerIndex, Application& app) -{ - std::shared_ptr ledger; - std::tie(ledger, std::ignore, std::ignore) = - loadLedgerHelperPostgres(uint32_t{ledgerIndex}, app); - finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger")); - return ledger; -} - -// Load a ledger by hash from Postgres -// @param hash hash of the ledger to load -// @param app reference to Application -// @return the loaded ledger -static std::shared_ptr -loadByHashPostgres(uint256 const& ledgerHash, Application& app) -{ - std::shared_ptr ledger; - std::tie(ledger, std::ignore, std::ignore) = - loadLedgerHelperPostgres(uint256{ledgerHash}, app); - - finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger")); - - assert(!ledger || ledger->info().hash == ledgerHash); - - return ledger; -} - -// Given a ledger sequence, return the ledger hash -// @param ledgerIndex ledger sequence -// @param app Application -// @return hash of ledger -static uint256 -getHashByIndexPostgres(std::uint32_t ledgerIndex, Application& app) -{ - auto infos = loadLedgerInfosPostgres(ledgerIndex, app); - assert(infos.size() <= 1); - if (infos.size()) - return infos[0].hash; - return {}; -} - -// Given a ledger sequence, return the ledger hash and the parent hash -// @param ledgerIndex ledger sequence -// @param[out] ledgerHash hash of ledger -// @param[out] parentHash hash of parent ledger -// @param app Application -// @return true if the data was found -static bool -getHashesByIndexPostgres( - std::uint32_t ledgerIndex, - uint256& ledgerHash, - uint256& parentHash, - Application& app) -{ - auto infos = loadLedgerInfosPostgres(ledgerIndex, app); - assert(infos.size() <= 1); - if (infos.size()) - { - ledgerHash = infos[0].hash; - parentHash = infos[0].parentHash; - return true; - } - return false; -} - -// Given a contiguous range of sequences, return a map of -// sequence -> (hash, parent hash) -// @param minSeq lower bound of range -// @param maxSeq upper bound of range -// @param app Application -// @return mapping of all found ledger sequences to their hash and parent hash -static std::map> -getHashesByIndexPostgres( - std::uint32_t minSeq, - std::uint32_t maxSeq, - Application& app) -{ - std::map> ret; - auto infos = loadLedgerInfosPostgres(std::make_pair(minSeq, maxSeq), app); - for (auto& info : infos) - { - ret[info.seq] = std::make_pair(info.hash, info.parentHash); - } - return ret; + const std::optional info = + app.getRelationalDBInterface().getNewestLedgerInfo(); + if (!info) + return {std::shared_ptr(), {}, {}}; + return {loadLedgerHelper(*info, app, true), info->seq, info->hash}; } std::shared_ptr loadByIndex(std::uint32_t ledgerIndex, Application& app, bool acquire) { - if (app.config().reporting()) - return loadByIndexPostgres(ledgerIndex, app); - std::shared_ptr ledger; + if (std::optional info = + app.getRelationalDBInterface().getLedgerInfoByIndex(ledgerIndex)) { - std::ostringstream s; - s << "WHERE LedgerSeq = " << ledgerIndex; - std::tie(ledger, std::ignore, std::ignore) = - loadLedgerHelper(s.str(), app, acquire); + std::shared_ptr ledger = loadLedgerHelper(*info, app, acquire); + finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger")); + return ledger; } - - finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger")); - return ledger; + return {}; } std::shared_ptr loadByHash(uint256 const& ledgerHash, Application& app, bool acquire) { - if (app.config().reporting()) - return loadByHashPostgres(ledgerHash, app); - std::shared_ptr ledger; + if (std::optional info = + app.getRelationalDBInterface().getLedgerInfoByHash(ledgerHash)) { - std::ostringstream s; - s << "WHERE LedgerHash = '" << ledgerHash << "'"; - std::tie(ledger, std::ignore, std::ignore) = - loadLedgerHelper(s.str(), app, acquire); + std::shared_ptr ledger = loadLedgerHelper(*info, app, acquire); + finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger")); + assert(!ledger || ledger->info().hash == ledgerHash); + return ledger; } - - finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger")); - - assert(!ledger || ledger->info().hash == ledgerHash); - - return ledger; -} - -uint256 -getHashByIndex(std::uint32_t ledgerIndex, Application& app) -{ - if (app.config().reporting()) - return getHashByIndexPostgres(ledgerIndex, app); - uint256 ret; - - std::string sql = - "SELECT LedgerHash FROM Ledgers INDEXED BY SeqLedger WHERE LedgerSeq='"; - sql.append(std::to_string(ledgerIndex)); - sql.append("';"); - - std::string hash; - { - auto db = app.getLedgerDB().checkoutDb(); - - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional lh; - *db << sql, soci::into(lh); - - if (!db->got_data() || !lh) - return ret; - - hash = *lh; - if (hash.empty()) - return ret; - } - - (void)ret.parseHex(hash); - return ret; -} - -bool -getHashesByIndex( - std::uint32_t ledgerIndex, - uint256& ledgerHash, - uint256& parentHash, - Application& app) -{ - if (app.config().reporting()) - return getHashesByIndexPostgres( - ledgerIndex, ledgerHash, parentHash, app); - auto db = app.getLedgerDB().checkoutDb(); - - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional lhO, phO; - - *db << "SELECT LedgerHash,PrevHash FROM Ledgers " - "INDEXED BY SeqLedger Where LedgerSeq = :ls;", - soci::into(lhO), soci::into(phO), soci::use(ledgerIndex); - - if (!lhO || !phO) - { - auto stream = app.journal("Ledger").trace(); - JLOG(stream) << "Don't have ledger " << ledgerIndex; - return false; - } - - return ledgerHash.parseHex(*lhO) && parentHash.parseHex(*phO); -} - -std::map> -getHashesByIndex(std::uint32_t minSeq, std::uint32_t maxSeq, Application& app) -{ - if (app.config().reporting()) - return getHashesByIndexPostgres(minSeq, maxSeq, app); - std::map> ret; - - std::string sql = - "SELECT LedgerSeq,LedgerHash,PrevHash FROM Ledgers WHERE LedgerSeq >= "; - sql.append(std::to_string(minSeq)); - sql.append(" AND LedgerSeq <= "); - sql.append(std::to_string(maxSeq)); - sql.append(";"); - - auto db = app.getLedgerDB().checkoutDb(); - - std::uint64_t ls; - std::string lh; - - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional ph; - soci::statement st = - (db->prepare << sql, soci::into(ls), soci::into(lh), soci::into(ph)); - - st.execute(); - while (st.fetch()) - { - std::pair& hashes = - ret[rangeCheckedCast(ls)]; - (void)hashes.first.parseHex(lh); - if (ph) - (void)hashes.second.parseHex(*ph); - else - hashes.second.zero(); - if (!ph) - { - auto stream = app.journal("Ledger").warn(); - JLOG(stream) << "Null prev hash for ledger seq: " << ls; - } - } - - return ret; + return {}; } std::vector< @@ -1783,69 +1162,10 @@ flatFetchTransactions(ReadView const& ledger, Application& app) assert(false); return {}; } - std::vector nodestoreHashes; -#ifdef RIPPLED_REPORTING - auto log = app.journal("Ledger"); - - std::string query = - "SELECT nodestore_hash" - " FROM transactions " - " WHERE ledger_seq = " + - std::to_string(ledger.info().seq); - auto res = PgQuery(app.getPgPool())(query.c_str()); - - if (!res) - { - JLOG(log.error()) << __func__ - << " : Postgres response is null - query = " << query; - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(log.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - query = " << query; - assert(false); - return {}; - } - - JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(log.debug()) << __func__ - << " : Ledger not found. query = " << query; - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 1) - { - JLOG(log.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 1, but got " - << res.nfields() << " . query = " << query; - assert(false); - return {}; - } - } - - JLOG(log.trace()) << __func__ << " : result = " << res.c_str() - << " : query = " << query; - for (size_t i = 0; i < res.ntuples(); ++i) - { - char const* nodestoreHash = res.c_str(i, 0); - uint256 hash; - if (!hash.parseHex(nodestoreHash + 2)) - assert(false); - - nodestoreHashes.push_back(hash); - } -#endif + auto nodestoreHashes = dynamic_cast( + &app.getRelationalDBInterface()) + ->getTxHashes(ledger.info().seq); return flatFetchTransactions(app, nodestoreHashes); } diff --git a/src/ripple/app/ledger/Ledger.h b/src/ripple/app/ledger/Ledger.h index 0467beac52..e614c6f27e 100644 --- a/src/ripple/app/ledger/Ledger.h +++ b/src/ripple/app/ledger/Ledger.h @@ -431,31 +431,15 @@ pendSaveValidated( bool isSynchronous, bool isCurrent); -extern std::shared_ptr +std::shared_ptr +loadLedgerHelper(LedgerInfo const& sinfo, Application& app, bool acquire); + +std::shared_ptr loadByIndex(std::uint32_t ledgerIndex, Application& app, bool acquire = true); -extern std::tuple, std::uint32_t, uint256> -loadLedgerHelper( - std::string const& sqlSuffix, - Application& app, - bool acquire = true); - -extern std::shared_ptr +std::shared_ptr loadByHash(uint256 const& ledgerHash, Application& app, bool acquire = true); -extern uint256 -getHashByIndex(std::uint32_t index, Application& app); - -extern bool -getHashesByIndex( - std::uint32_t index, - uint256& ledgerHash, - uint256& parentHash, - Application& app); - -extern std::map> -getHashesByIndex(std::uint32_t minSeq, std::uint32_t maxSeq, Application& app); - // Fetch the ledger with the highest sequence contained in the database extern std::tuple, std::uint32_t, uint256> getLatestLedger(Application& app); diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 864f5b1b74..e3c54e8036 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -34,6 +34,8 @@ #include #include #include +#include +#include #include #include #include @@ -275,15 +277,9 @@ LedgerMaster::getValidatedLedgerAge() #ifdef RIPPLED_REPORTING if (app_.config().reporting()) - { - auto age = PgQuery(app_.getPgPool())("SELECT age()"); - if (!age || age.isNull()) - { - JLOG(m_journal.debug()) << "No ledgers in database"; - return weeks{2}; - } - return std::chrono::seconds{age.asInt()}; - } + return dynamic_cast( + &app_.getRelationalDBInterface()) + ->getValidatedLedgerAge(); #endif std::chrono::seconds valClose{mValidLedgerSign.load()}; if (valClose == 0s) @@ -711,7 +707,7 @@ LedgerMaster::tryFill(Job& job, std::shared_ptr ledger) std::uint32_t seq = ledger->info().seq; uint256 prevHash = ledger->info().parentHash; - std::map> ledgerHashes; + std::map ledgerHashes; std::uint32_t minHas = seq; std::uint32_t maxHas = seq; @@ -740,15 +736,15 @@ LedgerMaster::tryFill(Job& job, std::shared_ptr ledger) mCompleteLedgers.insert(range(minHas, maxHas)); } maxHas = minHas; - ledgerHashes = - getHashesByIndex((seq < 500) ? 0 : (seq - 499), seq, app_); + ledgerHashes = app_.getRelationalDBInterface().getHashesByIndex( + (seq < 500) ? 0 : (seq - 499), seq); it = ledgerHashes.find(seq); if (it == ledgerHashes.end()) break; if (!nodeStore.fetchNodeObject( - ledgerHashes.begin()->second.first, + ledgerHashes.begin()->second.ledgerHash, ledgerHashes.begin()->first)) { // The ledger is not backed by the node store @@ -758,10 +754,10 @@ LedgerMaster::tryFill(Job& job, std::shared_ptr ledger) } } - if (it->second.first != prevHash) + if (it->second.ledgerHash != prevHash) break; - prevHash = it->second.second; + prevHash = it->second.parentHash; } { @@ -924,7 +920,8 @@ LedgerMaster::setFullLedger( { // Check the SQL database's entry for the sequence before this // ledger, if it's not this ledger's parent, invalidate it - uint256 prevHash = getHashByIndex(ledger->info().seq - 1, app_); + uint256 prevHash = app_.getRelationalDBInterface().getHashByIndex( + ledger->info().seq - 1); if (prevHash.isNonZero() && prevHash != ledger->info().parentHash) clearLedger(ledger->info().seq - 1); } @@ -1624,10 +1621,10 @@ LedgerMaster::getValidatedLedger() #ifdef RIPPLED_REPORTING if (app_.config().reporting()) { - auto seq = PgQuery(app_.getPgPool())("SELECT max_ledger()"); - if (!seq || seq.isNull()) + auto seq = app_.getRelationalDBInterface().getMaxLedgerSeq(); + if (!seq) return {}; - return getLedgerBySeq(seq.asInt()); + return getLedgerBySeq(*seq); } #endif return mValidLedger.get(); @@ -1660,13 +1657,9 @@ LedgerMaster::getCompleteLedgers() { #ifdef RIPPLED_REPORTING if (app_.config().reporting()) - { - auto range = PgQuery(app_.getPgPool())("SELECT complete_ledgers()"); - if (!range) - return "error"; - return range.c_str(); - } - + return dynamic_cast( + &app_.getRelationalDBInterface()) + ->getCompleteLedgers(); #endif std::lock_guard sl(mCompleteLock); return to_string(mCompleteLedgers); @@ -1710,7 +1703,7 @@ LedgerMaster::getHashBySeq(std::uint32_t index) if (hash.isNonZero()) return hash; - return getHashByIndex(index, app_); + return app_.getRelationalDBInterface().getHashByIndex(index); } std::optional @@ -1949,7 +1942,8 @@ LedgerMaster::fetchForHistory( fillInProgress = mFillInProgress; } if (fillInProgress == 0 && - getHashByIndex(seq - 1, app_) == ledger->info().parentHash) + app_.getRelationalDBInterface().getHashByIndex(seq - 1) == + ledger->info().parentHash) { { // Previous ledger is in DB @@ -2344,30 +2338,7 @@ LedgerMaster::getFetchPackCacheSize() const std::optional LedgerMaster::minSqlSeq() { - if (!app_.config().reporting()) - { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional seq; - auto db = app_.getLedgerDB().checkoutDb(); - *db << "SELECT MIN(LedgerSeq) FROM Ledgers", soci::into(seq); - if (seq) - return *seq; - } -#ifdef RIPPLED_REPORTING - { - auto seq = PgQuery(app_.getPgPool())("SELECT min_ledger()"); - if (!seq) - { - JLOG(m_journal.error()) - << "Error querying minimum ledger sequence."; - return {}; - } - if (seq.isNull()) - return {}; - return seq.asInt(); - } -#endif - return {}; + return app_.getRelationalDBInterface().getMinLedgerSeq(); } } // namespace ripple diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index ec9acd3472..ad349e2806 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -44,6 +44,8 @@ #include #include #include +#include +#include #include #include #include @@ -53,7 +55,6 @@ #include #include #include -#include #include #include #include @@ -169,9 +170,6 @@ public: // Required by the SHAMapStore TransactionMaster m_txMaster; -#ifdef RIPPLED_REPORTING - std::shared_ptr pgPool_; -#endif NodeStoreScheduler m_nodeStoreScheduler; std::unique_ptr m_shaMapStore; PendingSaves pendingSaves_; @@ -221,8 +219,7 @@ public: boost::asio::steady_timer entropyTimer_; bool startTimers_; - std::unique_ptr mTxnDB; - std::unique_ptr mLedgerDB; + std::unique_ptr mRelationalDBInterface; std::unique_ptr mWalletDB; std::unique_ptr overlay_; std::vector> websocketServers_; @@ -285,14 +282,6 @@ public: [this]() { signalStop(); })) , m_txMaster(*this) -#ifdef RIPPLED_REPORTING - , pgPool_( - config_->reporting() ? make_PgPool( - config_->section("ledger_tx_tables"), - *this, - logs_->journal("PgPool")) - : nullptr) -#endif , m_nodeStoreScheduler(*this) , m_shaMapStore(make_SHAMapStore( @@ -860,27 +849,12 @@ public: return *txQ_; } - DatabaseCon& - getTxnDB() override + RelationalDBInterface& + getRelationalDBInterface() override { - assert(mTxnDB.get() != nullptr); - return *mTxnDB; + assert(mRelationalDBInterface.get() != nullptr); + return *mRelationalDBInterface; } - DatabaseCon& - getLedgerDB() override - { - assert(mLedgerDB.get() != nullptr); - return *mLedgerDB; - } - -#ifdef RIPPLED_REPORTING - std::shared_ptr const& - getPgPool() override - { - assert(pgPool_); - return pgPool_; - } -#endif DatabaseCon& getWalletDB() override @@ -907,85 +881,18 @@ public: bool initRDBMS() { - assert(mTxnDB.get() == nullptr); - assert(mLedgerDB.get() == nullptr); assert(mWalletDB.get() == nullptr); try { - auto setup = setup_DatabaseCon(*config_, m_journal); - if (!config_->reporting()) - { - if (config_->useTxTables()) - { - // transaction database - mTxnDB = std::make_unique( - setup, - TxDBName, - TxDBPragma, - TxDBInit, - DatabaseCon::CheckpointerSetup{ - m_jobQueue.get(), &logs()}); - mTxnDB->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config_->getValueFor(SizedItem::txnDBCache))); - if (!setup.standAlone || setup.startUp == Config::LOAD || - setup.startUp == Config::LOAD_FILE || - setup.startUp == Config::REPLAY) - { - // Check if AccountTransactions has primary key - std::string cid, name, type; - std::size_t notnull, dflt_value, pk; - soci::indicator ind; - soci::statement st = - (mTxnDB->getSession().prepare - << ("PRAGMA table_info(AccountTransactions);"), - soci::into(cid), - soci::into(name), - soci::into(type), - soci::into(notnull), - soci::into(dflt_value, ind), - soci::into(pk)); - - st.execute(); - while (st.fetch()) - { - if (pk == 1) - { - JLOG(m_journal.fatal()) - << "AccountTransactions database " - "should not have a primary key"; - return false; - } - } - } - } - - // ledger database - mLedgerDB = std::make_unique( - setup, - LgrDBName, - LgrDBPragma, - LgrDBInit, - DatabaseCon::CheckpointerSetup{m_jobQueue.get(), &logs()}); - mLedgerDB->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config_->getValueFor(SizedItem::lgrDBCache))); - } - else if (!config_->reportingReadOnly()) // use pg - { -#ifdef RIPPLED_REPORTING - initSchema(pgPool_); -#endif - } + mRelationalDBInterface = + RelationalDBInterface::init(*this, *config_, *m_jobQueue); // wallet database + auto setup = setup_DatabaseCon(*config_, m_journal); setup.useGlobalPragma = false; - mWalletDB = std::make_unique( - setup, - WalletDBName, - std::array(), - WalletDBInit); + + mWalletDB = makeWalletDB(setup); } catch (std::exception const& e) { @@ -1206,71 +1113,10 @@ public: void doSweep() { - if (!config_->standalone()) + if (!config_->standalone() && + !getRelationalDBInterface().transactionDbHasSpace(*config_)) { - boost::filesystem::space_info space = - boost::filesystem::space(config_->legacy("database_path")); - - if (space.available < megabytes(512)) - { - JLOG(m_journal.fatal()) - << "Remaining free disk space is less than 512MB"; - signalStop(); - } - - if (!config_->reporting() && config_->useTxTables()) - { - DatabaseCon::Setup dbSetup = setup_DatabaseCon(*config_); - boost::filesystem::path dbPath = dbSetup.dataDir / TxDBName; - boost::system::error_code ec; - std::optional dbSize = - boost::filesystem::file_size(dbPath, ec); - if (ec) - { - JLOG(m_journal.error()) - << "Error checking transaction db file size: " - << ec.message(); - dbSize.reset(); - } - - auto db = mTxnDB->checkoutDb(); - static auto const pageSize = [&] { - std::uint32_t ps; - *db << "PRAGMA page_size;", soci::into(ps); - return ps; - }(); - static auto const maxPages = [&] { - std::uint32_t mp; - *db << "PRAGMA max_page_count;", soci::into(mp); - return mp; - }(); - std::uint32_t pageCount; - *db << "PRAGMA page_count;", soci::into(pageCount); - std::uint32_t freePages = maxPages - pageCount; - std::uint64_t freeSpace = - safe_cast(freePages) * pageSize; - JLOG(m_journal.info()) - << "Transaction DB pathname: " << dbPath.string() - << "; file size: " << dbSize.value_or(-1) << " bytes" - << "; SQLite page size: " << pageSize << " bytes" - << "; Free pages: " << freePages - << "; Free space: " << freeSpace << " bytes; " - << "Note that this does not take into account available " - "disk " - "space."; - - if (freeSpace < megabytes(512)) - { - JLOG(m_journal.fatal()) - << "Free SQLite space for transaction db is less than " - "512MB. To fix this, rippled must be executed with " - "the " - "vacuum parameter before restarting. " - "Note that this activity can take multiple days, " - "depending on database size."; - signalStop(); - } - } + signalStop(); } // VFALCO NOTE Does the order of calls matter? @@ -1294,7 +1140,9 @@ public: #ifdef RIPPLED_REPORTING if (config().reporting()) - pgPool_->idleSweeper(); + dynamic_cast( + &*mRelationalDBInterface) + ->sweep(); #endif // Set timer to do another sweep later. @@ -1359,8 +1207,6 @@ ApplicationImp::setup() signalStop(); }); - assert(mTxnDB == nullptr); - auto debug_log = config_->getDebugLogFile(); if (!debug_log.empty()) @@ -1485,7 +1331,7 @@ ApplicationImp::setup() if (!config().reporting()) m_orderBookDB.setup(getLedgerMaster().getCurrentLedger()); - nodeIdentity_ = loadNodeIdentity(*this); + nodeIdentity_ = getNodeIdentity(*this); if (!cluster_->load(config().section(SECTION_CLUSTER_NODES))) { @@ -2271,25 +2117,9 @@ ApplicationImp::nodeToShards() void ApplicationImp::setMaxDisallowedLedger() { - if (config().reporting()) - { -#ifdef RIPPLED_REPORTING - auto seq = PgQuery(pgPool_)("SELECT max_ledger()"); - if (seq && !seq.isNull()) - maxDisallowedLedger_ = seq.asBigInt(); -#endif - } - else - { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional seq; - { - auto db = getLedgerDB().checkoutDb(); - *db << "SELECT MAX(LedgerSeq) FROM Ledgers;", soci::into(seq); - } - if (seq) - maxDisallowedLedger_ = *seq; - } + auto seq = getRelationalDBInterface().getMaxLedgerSeq(); + if (seq) + maxDisallowedLedger_ = *seq; JLOG(m_journal.trace()) << "Max persisted ledger is " << maxDisallowedLedger_; diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index c88bb39d84..f0d00b0815 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -74,7 +74,6 @@ class OrderBookDB; class Overlay; class PathRequests; class PendingSaves; -class PgPool; class PublicKey; class SecretKey; class AccountIDCache; @@ -87,6 +86,7 @@ class ValidatorList; class ValidatorSite; class Cluster; +class RelationalDBInterface; class DatabaseCon; class SHAMapStore; @@ -236,10 +236,8 @@ public: openLedger() = 0; virtual OpenLedger const& openLedger() const = 0; - virtual DatabaseCon& - getTxnDB() = 0; - virtual DatabaseCon& - getLedgerDB() = 0; + virtual RelationalDBInterface& + getRelationalDBInterface() = 0; virtual std::chrono::milliseconds getIOLatency() = 0; @@ -250,11 +248,6 @@ public: virtual bool serverOkay(std::string& reason) = 0; -#ifdef RIPPLED_REPORTING - virtual std::shared_ptr const& - getPgPool() = 0; -#endif - virtual beast::Journal journal(std::string const& name) = 0; diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index 377d6c2567..815d342d06 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -26,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -548,48 +548,11 @@ run(int argc, char** argv) return -1; } - using namespace boost::filesystem; - DatabaseCon::Setup const dbSetup = setup_DatabaseCon(*config); - path dbPath = dbSetup.dataDir / TxDBName; - try { - uintmax_t const dbSize = file_size(dbPath); - assert(dbSize != static_cast(-1)); - - if (auto available = space(dbPath.parent_path()).available; - available < dbSize) - { - std::cerr << "The database filesystem must have at least as " - "much free space as the size of " - << dbPath.string() << ", which is " << dbSize - << " bytes. Only " << available - << " bytes are available.\n"; + auto setup = setup_DatabaseCon(*config); + if (!doVacuumDB(setup)) return -1; - } - - auto txnDB = std::make_unique( - dbSetup, TxDBName, TxDBPragma, TxDBInit); - auto& session = txnDB->getSession(); - std::uint32_t pageSize; - - // Only the most trivial databases will fit in memory on typical - // (recommended) software. Force temp files to be written to disk - // regardless of the config settings. - session << boost::format(CommonDBPragmaTemp) % "file"; - session << "PRAGMA page_size;", soci::into(pageSize); - - std::cout << "VACUUM beginning. page_size: " << pageSize - << std::endl; - - session << "VACUUM;"; - assert(dbSetup.globalPragma); - for (auto const& p : *dbSetup.globalPragma) - session << p; - session << "PRAGMA page_size;", soci::into(pageSize); - - std::cout << "VACUUM finished. page_size: " << pageSize - << std::endl; } catch (std::exception const& e) { diff --git a/src/ripple/app/main/NodeIdentity.cpp b/src/ripple/app/main/NodeIdentity.cpp index 5e1d12c0d6..5f7cca7a59 100644 --- a/src/ripple/app/main/NodeIdentity.cpp +++ b/src/ripple/app/main/NodeIdentity.cpp @@ -19,17 +19,17 @@ #include #include +#include #include #include #include -#include #include #include namespace ripple { std::pair -loadNodeIdentity(Application& app) +getNodeIdentity(Application& app) { // If a seed is specified in the configuration file use that directly. if (app.config().exists(SECTION_NODE_SEED)) @@ -47,49 +47,8 @@ loadNodeIdentity(Application& app) return {publicKey, secretKey}; } - // Try to load a node identity from the database: - std::optional publicKey; - std::optional secretKey; - auto db = app.getWalletDB().checkoutDb(); - - { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional pubKO, priKO; - soci::statement st = - (db->prepare << "SELECT PublicKey, PrivateKey FROM NodeIdentity;", - soci::into(pubKO), - soci::into(priKO)); - st.execute(); - while (st.fetch()) - { - auto const sk = parseBase58( - TokenType::NodePrivate, priKO.value_or("")); - auto const pk = parseBase58( - TokenType::NodePublic, pubKO.value_or("")); - - // Only use if the public and secret keys are a pair - if (sk && pk && (*pk == derivePublicKey(KeyType::secp256k1, *sk))) - { - secretKey = sk; - publicKey = pk; - } - } - } - - // If a valid identity wasn't found, we randomly generate a new one: - if (!publicKey || !secretKey) - { - std::tie(publicKey, secretKey) = randomKeyPair(KeyType::secp256k1); - - *db << str( - boost::format("INSERT INTO NodeIdentity (PublicKey,PrivateKey) " - "VALUES ('%s','%s');") % - toBase58(TokenType::NodePublic, *publicKey) % - toBase58(TokenType::NodePrivate, *secretKey)); - } - - return {*publicKey, *secretKey}; + return getNodeIdentity(*db); } } // namespace ripple diff --git a/src/ripple/app/main/NodeIdentity.h b/src/ripple/app/main/NodeIdentity.h index 4d4a520155..60deeed856 100644 --- a/src/ripple/app/main/NodeIdentity.h +++ b/src/ripple/app/main/NodeIdentity.h @@ -29,7 +29,7 @@ namespace ripple { /** The cryptographic credentials identifying this server instance. */ std::pair -loadNodeIdentity(Application& app); +getNodeIdentity(Application& app); } // namespace ripple diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 9ce8e4156a..9ed4895f14 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -453,66 +454,6 @@ public: std::size_t getLocalTxCount() override; - // Helper function to generate SQL query to get transactions. - std::string - transactionsSQL( - std::string selection, - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool descending, - std::uint32_t offset, - int limit, - bool binary, - bool count, - bool bUnlimited); - - // Client information retrieval functions. - using NetworkOPs::AccountTxMarker; - using NetworkOPs::AccountTxs; - AccountTxs - getAccountTxs( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool descending, - std::uint32_t offset, - int limit, - bool bUnlimited) override; - - AccountTxs - getTxsAccount( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool forward, - std::optional& marker, - int limit, - bool bUnlimited) override; - - using NetworkOPs::MetaTxsList; - using NetworkOPs::txnMetaLedgerType; - - MetaTxsList - getAccountTxsB( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool descending, - std::uint32_t offset, - int limit, - bool bUnlimited) override; - - MetaTxsList - getTxsAccountB( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool forward, - std::optional& marker, - int limit, - bool bUnlimited) override; - // // Monitoring: publisher side. // @@ -2204,306 +2145,6 @@ NetworkOPsImp::setMode(OperatingMode om) pubServer(); } -std::string -NetworkOPsImp::transactionsSQL( - std::string selection, - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool descending, - std::uint32_t offset, - int limit, - bool binary, - bool count, - bool bUnlimited) -{ - std::uint32_t NONBINARY_PAGE_LENGTH = 200; - std::uint32_t BINARY_PAGE_LENGTH = 500; - - std::uint32_t numberOfResults; - - if (count) - { - numberOfResults = 1000000000; - } - else if (limit < 0) - { - numberOfResults = binary ? BINARY_PAGE_LENGTH : NONBINARY_PAGE_LENGTH; - } - else if (!bUnlimited) - { - numberOfResults = std::min( - binary ? BINARY_PAGE_LENGTH : NONBINARY_PAGE_LENGTH, - static_cast(limit)); - } - else - { - numberOfResults = limit; - } - - std::string maxClause = ""; - std::string minClause = ""; - - if (maxLedger != -1) - { - maxClause = boost::str( - boost::format("AND AccountTransactions.LedgerSeq <= '%u'") % - maxLedger); - } - - if (minLedger != -1) - { - minClause = boost::str( - boost::format("AND AccountTransactions.LedgerSeq >= '%u'") % - minLedger); - } - - std::string sql; - - if (count) - sql = boost::str( - boost::format("SELECT %s FROM AccountTransactions " - "WHERE Account = '%s' %s %s LIMIT %u, %u;") % - selection % app_.accountIDCache().toBase58(account) % maxClause % - minClause % offset % numberOfResults); - else - sql = boost::str( - boost::format( - "SELECT %s FROM " - "AccountTransactions INNER JOIN Transactions " - "ON Transactions.TransID = AccountTransactions.TransID " - "WHERE Account = '%s' %s %s " - "ORDER BY AccountTransactions.LedgerSeq %s, " - "AccountTransactions.TxnSeq %s, AccountTransactions.TransID %s " - "LIMIT %u, %u;") % - selection % app_.accountIDCache().toBase58(account) % maxClause % - minClause % (descending ? "DESC" : "ASC") % - (descending ? "DESC" : "ASC") % (descending ? "DESC" : "ASC") % - offset % numberOfResults); - JLOG(m_journal.trace()) << "txSQL query: " << sql; - return sql; -} - -NetworkOPs::AccountTxs -NetworkOPsImp::getAccountTxs( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool descending, - std::uint32_t offset, - int limit, - bool bUnlimited) -{ - // can be called with no locks - AccountTxs ret; - - std::string sql = transactionsSQL( - "AccountTransactions.LedgerSeq,Status,RawTxn,TxnMeta", - account, - minLedger, - maxLedger, - descending, - offset, - limit, - false, - false, - bUnlimited); - - { - auto db = app_.getTxnDB().checkoutDb(); - - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional ledgerSeq; - boost::optional status; - soci::blob sociTxnBlob(*db), sociTxnMetaBlob(*db); - soci::indicator rti, tmi; - Blob rawTxn, txnMeta; - - soci::statement st = - (db->prepare << sql, - soci::into(ledgerSeq), - soci::into(status), - soci::into(sociTxnBlob, rti), - soci::into(sociTxnMetaBlob, tmi)); - - st.execute(); - while (st.fetch()) - { - if (soci::i_ok == rti) - convert(sociTxnBlob, rawTxn); - else - rawTxn.clear(); - - if (soci::i_ok == tmi) - convert(sociTxnMetaBlob, txnMeta); - else - txnMeta.clear(); - - auto txn = Transaction::transactionFromSQL( - ledgerSeq, status, rawTxn, app_); - - if (txnMeta.empty()) - { // Work around a bug that could leave the metadata missing - auto const seq = - rangeCheckedCast(ledgerSeq.value_or(0)); - - JLOG(m_journal.warn()) - << "Recovering ledger " << seq << ", txn " << txn->getID(); - - if (auto l = m_ledgerMaster.getLedgerBySeq(seq)) - pendSaveValidated(app_, l, false, false); - } - - if (txn) - ret.emplace_back( - txn, - std::make_shared( - txn->getID(), txn->getLedger(), txnMeta)); - } - } - - return ret; -} - -std::vector -NetworkOPsImp::getAccountTxsB( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool descending, - std::uint32_t offset, - int limit, - bool bUnlimited) -{ - // can be called with no locks - std::vector ret; - - std::string sql = transactionsSQL( - "AccountTransactions.LedgerSeq,Status,RawTxn,TxnMeta", - account, - minLedger, - maxLedger, - descending, - offset, - limit, - true /*binary*/, - false, - bUnlimited); - - { - auto db = app_.getTxnDB().checkoutDb(); - - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional ledgerSeq; - boost::optional status; - soci::blob sociTxnBlob(*db), sociTxnMetaBlob(*db); - soci::indicator rti, tmi; - - soci::statement st = - (db->prepare << sql, - soci::into(ledgerSeq), - soci::into(status), - soci::into(sociTxnBlob, rti), - soci::into(sociTxnMetaBlob, tmi)); - - st.execute(); - while (st.fetch()) - { - Blob rawTxn; - if (soci::i_ok == rti) - convert(sociTxnBlob, rawTxn); - Blob txnMeta; - if (soci::i_ok == tmi) - convert(sociTxnMetaBlob, txnMeta); - - auto const seq = - rangeCheckedCast(ledgerSeq.value_or(0)); - - ret.emplace_back(std::move(rawTxn), std::move(txnMeta), seq); - } - } - - return ret; -} - -NetworkOPsImp::AccountTxs -NetworkOPsImp::getTxsAccount( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool forward, - std::optional& marker, - int limit, - bool bUnlimited) -{ - static std::uint32_t const page_length(200); - - Application& app = app_; - NetworkOPsImp::AccountTxs ret; - - auto bound = [&ret, &app]( - std::uint32_t ledger_index, - std::string const& status, - Blob&& rawTxn, - Blob&& rawMeta) { - convertBlobsToTxResult(ret, ledger_index, status, rawTxn, rawMeta, app); - }; - - accountTxPage( - app_.getTxnDB(), - app_.accountIDCache(), - std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1), - bound, - account, - minLedger, - maxLedger, - forward, - marker, - limit, - bUnlimited, - page_length); - - return ret; -} - -NetworkOPsImp::MetaTxsList -NetworkOPsImp::getTxsAccountB( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool forward, - std::optional& marker, - int limit, - bool bUnlimited) -{ - static const std::uint32_t page_length(500); - - MetaTxsList ret; - - auto bound = [&ret]( - std::uint32_t ledgerIndex, - std::string const& status, - Blob&& rawTxn, - Blob&& rawMeta) { - ret.emplace_back(std::move(rawTxn), std::move(rawMeta), ledgerIndex); - }; - - accountTxPage( - app_.getTxnDB(), - app_.accountIDCache(), - std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1), - bound, - account, - minLedger, - maxLedger, - forward, - marker, - limit, - bUnlimited, - page_length); - return ret; -} - bool NetworkOPsImp::recvValidation( std::shared_ptr const& val, diff --git a/src/ripple/app/misc/NetworkOPs.h b/src/ripple/app/misc/NetworkOPs.h index e9643de4fb..5a982e5d15 100644 --- a/src/ripple/app/misc/NetworkOPs.h +++ b/src/ripple/app/misc/NetworkOPs.h @@ -1,4 +1,4 @@ -//------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled Copyright (c) 2012, 2013 Ripple Labs Inc. @@ -251,60 +251,6 @@ public: virtual std::size_t getLocalTxCount() = 0; - struct AccountTxMarker - { - uint32_t ledgerSeq = 0; - uint32_t txnSeq = 0; - }; - - // client information retrieval functions - using AccountTx = - std::pair, std::shared_ptr>; - using AccountTxs = std::vector; - - virtual AccountTxs - getAccountTxs( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool descending, - std::uint32_t offset, - int limit, - bool bUnlimited) = 0; - - virtual AccountTxs - getTxsAccount( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool forward, - std::optional& marker, - int limit, - bool bUnlimited) = 0; - - using txnMetaLedgerType = std::tuple; - using MetaTxsList = std::vector; - - virtual MetaTxsList - getAccountTxsB( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool descending, - std::uint32_t offset, - int limit, - bool bUnlimited) = 0; - - virtual MetaTxsList - getTxsAccountB( - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool forward, - std::optional& marker, - int limit, - bool bUnlimited) = 0; - //-------------------------------------------------------------------------- // // Monitoring: publisher side diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index c5a97b547d..4f70735312 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include #include @@ -34,68 +36,15 @@ SHAMapStoreImp::SavedStateDB::init( std::string const& dbName) { std::lock_guard lock(mutex_); - - open(session_, config, dbName); - - session_ << "PRAGMA synchronous=FULL;"; - - session_ << "CREATE TABLE IF NOT EXISTS DbState (" - " Key INTEGER PRIMARY KEY," - " WritableDb TEXT," - " ArchiveDb TEXT," - " LastRotatedLedger INTEGER" - ");"; - - session_ << "CREATE TABLE IF NOT EXISTS CanDelete (" - " Key INTEGER PRIMARY KEY," - " CanDeleteSeq INTEGER" - ");"; - - std::int64_t count = 0; - { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional countO; - session_ << "SELECT COUNT(Key) FROM DbState WHERE Key = 1;", - soci::into(countO); - if (!countO) - Throw( - "Failed to fetch Key Count from DbState."); - count = *countO; - } - - if (!count) - { - session_ << "INSERT INTO DbState VALUES (1, '', '', 0);"; - } - - { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional countO; - session_ << "SELECT COUNT(Key) FROM CanDelete WHERE Key = 1;", - soci::into(countO); - if (!countO) - Throw( - "Failed to fetch Key Count from CanDelete."); - count = *countO; - } - - if (!count) - { - session_ << "INSERT INTO CanDelete VALUES (1, 0);"; - } + initStateDB(sqlDb_, config, dbName); } LedgerIndex SHAMapStoreImp::SavedStateDB::getCanDelete() { - LedgerIndex seq; std::lock_guard lock(mutex_); - session_ << "SELECT CanDeleteSeq FROM CanDelete WHERE Key = 1;", - soci::into(seq); - ; - - return seq; + return ripple::getCanDelete(sqlDb_); } LedgerIndex @@ -103,47 +52,29 @@ SHAMapStoreImp::SavedStateDB::setCanDelete(LedgerIndex canDelete) { std::lock_guard lock(mutex_); - session_ << "UPDATE CanDelete SET CanDeleteSeq = :canDelete WHERE Key = 1;", - soci::use(canDelete); - - return canDelete; + return ripple::setCanDelete(sqlDb_, canDelete); } -SHAMapStoreImp::SavedState +SavedState SHAMapStoreImp::SavedStateDB::getState() { - SavedState state; - std::lock_guard lock(mutex_); - session_ << "SELECT WritableDb, ArchiveDb, LastRotatedLedger" - " FROM DbState WHERE Key = 1;", - soci::into(state.writableDb), soci::into(state.archiveDb), - soci::into(state.lastRotated); - - return state; + return ripple::getSavedState(sqlDb_); } void SHAMapStoreImp::SavedStateDB::setState(SavedState const& state) { std::lock_guard lock(mutex_); - session_ << "UPDATE DbState" - " SET WritableDb = :writableDb," - " ArchiveDb = :archiveDb," - " LastRotatedLedger = :lastRotated" - " WHERE Key = 1;", - soci::use(state.writableDb), soci::use(state.archiveDb), - soci::use(state.lastRotated); + ripple::setSavedState(sqlDb_, state); } void SHAMapStoreImp::SavedStateDB::setLastRotated(LedgerIndex seq) { std::lock_guard lock(mutex_); - session_ << "UPDATE DbState SET LastRotatedLedger = :seq" - " WHERE Key = 1;", - soci::use(seq); + ripple::setLastRotated(sqlDb_, seq); } //------------------------------------------------------------------------------ @@ -345,9 +276,7 @@ SHAMapStoreImp::run() ledgerMaster_ = &app_.getLedgerMaster(); fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache(0)); treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache(0)); - if (app_.config().useTxTables()) - transactionDb_ = &app_.getTxnDB(); - ledgerDb_ = &app_.getLedgerDB(); + if (advisoryDelete_) canDelete_ = state_db_.getCanDelete(); @@ -596,24 +525,19 @@ SHAMapStoreImp::makeBackendRotating(std::string path) void SHAMapStoreImp::clearSql( - DatabaseCon& database, LedgerIndex lastRotated, - std::string const& minQuery, - std::string const& deleteQuery) + const std::string TableName, + std::function()> const& getMinSeq, + std::function const& deleteBeforeSeq) { assert(deleteInterval_); LedgerIndex min = std::numeric_limits::max(); { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional m; JLOG(journal_.trace()) - << "Begin: Look up lowest value of: " << minQuery; - { - auto db = database.checkoutDb(); - *db << minQuery, soci::into(m); - } - JLOG(journal_.trace()) << "End: Look up lowest value of: " << minQuery; + << "Begin: Look up lowest value of: " << TableName; + auto m = getMinSeq(); + JLOG(journal_.trace()) << "End: Look up lowest value of: " << TableName; if (!m) return; min = *m; @@ -624,27 +548,22 @@ SHAMapStoreImp::clearSql( if (min == lastRotated) { // Micro-optimization mainly to clarify logs - JLOG(journal_.trace()) << "Nothing to delete from " << deleteQuery; + JLOG(journal_.trace()) << "Nothing to delete from " << TableName; return; } - boost::format formattedDeleteQuery(deleteQuery); - - JLOG(journal_.debug()) << "start: " << deleteQuery << " from " << min - << " to " << lastRotated; + JLOG(journal_.debug()) << "start deleting in: " << TableName << " from " + << min << " to " << lastRotated; while (min < lastRotated) { min = std::min(lastRotated, min + deleteBatch_); - JLOG(journal_.trace()) << "Begin: Delete up to " << deleteBatch_ - << " rows with LedgerSeq < " << min - << " using query: " << deleteQuery; - { - auto db = database.checkoutDb(); - *db << boost::str(formattedDeleteQuery % min); - } + JLOG(journal_.trace()) + << "Begin: Delete up to " << deleteBatch_ + << " rows with LedgerSeq < " << min << " from: " << TableName; + deleteBeforeSeq(min); JLOG(journal_.trace()) << "End: Delete up to " << deleteBatch_ << " rows with LedgerSeq < " - << min << " using query: " << deleteQuery; + << min << " from: " << TableName; if (health()) return; if (min < lastRotated) @@ -652,7 +571,7 @@ SHAMapStoreImp::clearSql( if (health()) return; } - JLOG(journal_.debug()) << "finished: " << deleteQuery; + JLOG(journal_.debug()) << "finished deleting from: " << TableName; } void @@ -692,11 +611,19 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) if (health()) return; + RelationalDBInterfaceSqlite* iface = + dynamic_cast( + &app_.getRelationalDBInterface()); + clearSql( - *ledgerDb_, lastRotated, - "SELECT MIN(LedgerSeq) FROM Ledgers;", - "DELETE FROM Ledgers WHERE LedgerSeq < %u;"); + "Ledgers", + [&iface]() -> std::optional { + return iface->getMinLedgerSeq(); + }, + [&iface](LedgerIndex min) -> void { + iface->deleteBeforeLedgerSeq(min); + }); if (health()) return; @@ -704,18 +631,26 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) return; clearSql( - *transactionDb_, lastRotated, - "SELECT MIN(LedgerSeq) FROM Transactions;", - "DELETE FROM Transactions WHERE LedgerSeq < %u;"); + "Transactions", + [&iface]() -> std::optional { + return iface->getTransactionsMinLedgerSeq(); + }, + [&iface](LedgerIndex min) -> void { + iface->deleteTransactionsBeforeLedgerSeq(min); + }); if (health()) return; clearSql( - *transactionDb_, lastRotated, - "SELECT MIN(LedgerSeq) FROM AccountTransactions;", - "DELETE FROM AccountTransactions WHERE LedgerSeq < %u;"); + "AccountTransactions", + [&iface]() -> std::optional { + return iface->getAccountTransactionsMinLedgerSeq(); + }, + [&iface](LedgerIndex min) -> void { + iface->deleteAccountTransactionsBeforeLedgerSeq(min); + }); if (health()) return; } diff --git a/src/ripple/app/misc/SHAMapStoreImp.h b/src/ripple/app/misc/SHAMapStoreImp.h index 188788bb6b..ea39abd6c0 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.h +++ b/src/ripple/app/misc/SHAMapStoreImp.h @@ -22,6 +22,8 @@ #include #include +#include +#include #include #include #include @@ -37,19 +39,12 @@ class NetworkOPs; class SHAMapStoreImp : public Stoppable, public SHAMapStore { private: - struct SavedState - { - std::string writableDb; - std::string archiveDb; - LedgerIndex lastRotated; - }; - enum Health : std::uint8_t { ok = 0, stopping, unhealthy }; class SavedStateDB { public: - soci::session session_; + soci::session sqlDb_; std::mutex mutex_; beast::Journal const journal_; @@ -123,8 +118,6 @@ private: LedgerMaster* ledgerMaster_ = nullptr; FullBelowCache* fullBelowCache_ = nullptr; TreeNodeCache* treeNodeCache_ = nullptr; - DatabaseCon* transactionDb_ = nullptr; - DatabaseCon* ledgerDb_ = nullptr; static constexpr auto nodeStoreName_ = "NodeStore"; @@ -226,10 +219,10 @@ private: */ void clearSql( - DatabaseCon& database, LedgerIndex lastRotated, - std::string const& minQuery, - std::string const& deleteQuery); + const std::string TableName, + std::function()> const& getMinSeq, + std::function const& deleteBeforeSeq); void clearCaches(LedgerIndex validatedSeq); void diff --git a/src/ripple/app/misc/impl/AccountTxPaging.cpp b/src/ripple/app/misc/impl/AccountTxPaging.cpp index fd274c342a..5c1e801701 100644 --- a/src/ripple/app/misc/impl/AccountTxPaging.cpp +++ b/src/ripple/app/misc/impl/AccountTxPaging.cpp @@ -31,7 +31,7 @@ namespace ripple { void convertBlobsToTxResult( - NetworkOPs::AccountTxs& to, + RelationalDBInterface::AccountTxs& to, std::uint32_t ledger_index, std::string const& status, Blob const& rawTxn, @@ -60,212 +60,4 @@ saveLedgerAsync(Application& app, std::uint32_t seq) pendSaveValidated(app, l, false, false); } -void -accountTxPage( - DatabaseCon& connection, - AccountIDCache const& idCache, - std::function const& onUnsavedLedger, - std::function< - void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& - onTransaction, - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool forward, - std::optional& marker, - int limit, - bool bAdmin, - std::uint32_t page_length) -{ - bool lookingForMarker = marker.has_value(); - - std::uint32_t numberOfResults; - - if (limit <= 0 || (limit > page_length && !bAdmin)) - numberOfResults = page_length; - else - numberOfResults = limit; - - // As an account can have many thousands of transactions, there is a limit - // placed on the amount of transactions returned. If the limit is reached - // before the result set has been exhausted (we always query for one more - // than the limit), then we return an opaque marker that can be supplied in - // a subsequent query. - std::uint32_t queryLimit = numberOfResults + 1; - std::uint32_t findLedger = 0, findSeq = 0; - - if (lookingForMarker) - { - findLedger = marker->ledgerSeq; - findSeq = marker->txnSeq; - } - - // marker is also an output parameter, so need to reset - marker.reset(); - - static std::string const prefix( - R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, - Status,RawTxn,TxnMeta - FROM AccountTransactions INNER JOIN Transactions - ON Transactions.TransID = AccountTransactions.TransID - AND AccountTransactions.Account = '%s' WHERE - )"); - - std::string sql; - - // SQL's BETWEEN uses a closed interval ([a,b]) - - if (forward && (findLedger == 0)) - { - sql = boost::str( - boost::format( - prefix + (R"(AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u' - ORDER BY AccountTransactions.LedgerSeq ASC, - AccountTransactions.TxnSeq ASC - LIMIT %u;)")) % - idCache.toBase58(account) % minLedger % maxLedger % queryLimit); - } - else if (forward && (findLedger != 0)) - { - auto b58acct = idCache.toBase58(account); - sql = boost::str( - boost::format(( - R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, - Status,RawTxn,TxnMeta - FROM AccountTransactions, Transactions WHERE - (AccountTransactions.TransID = Transactions.TransID AND - AccountTransactions.Account = '%s' AND - AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u') - OR - (AccountTransactions.TransID = Transactions.TransID AND - AccountTransactions.Account = '%s' AND - AccountTransactions.LedgerSeq = '%u' AND - AccountTransactions.TxnSeq >= '%u') - ORDER BY AccountTransactions.LedgerSeq ASC, - AccountTransactions.TxnSeq ASC - LIMIT %u; - )")) % - b58acct % (findLedger + 1) % maxLedger % b58acct % findLedger % - findSeq % queryLimit); - } - else if (!forward && (findLedger == 0)) - { - sql = boost::str( - boost::format( - prefix + (R"(AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u' - ORDER BY AccountTransactions.LedgerSeq DESC, - AccountTransactions.TxnSeq DESC - LIMIT %u;)")) % - idCache.toBase58(account) % minLedger % maxLedger % queryLimit); - } - else if (!forward && (findLedger != 0)) - { - auto b58acct = idCache.toBase58(account); - sql = boost::str( - boost::format(( - R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, - Status,RawTxn,TxnMeta - FROM AccountTransactions, Transactions WHERE - (AccountTransactions.TransID = Transactions.TransID AND - AccountTransactions.Account = '%s' AND - AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u') - OR - (AccountTransactions.TransID = Transactions.TransID AND - AccountTransactions.Account = '%s' AND - AccountTransactions.LedgerSeq = '%u' AND - AccountTransactions.TxnSeq <= '%u') - ORDER BY AccountTransactions.LedgerSeq DESC, - AccountTransactions.TxnSeq DESC - LIMIT %u; - )")) % - b58acct % minLedger % (findLedger - 1) % b58acct % findLedger % - findSeq % queryLimit); - } - else - { - assert(false); - // sql is empty - return; - } - - { - auto db(connection.checkoutDb()); - - Blob rawData; - Blob rawMeta; - - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional ledgerSeq; - boost::optional txnSeq; - boost::optional status; - soci::blob txnData(*db); - soci::blob txnMeta(*db); - soci::indicator dataPresent, metaPresent; - - soci::statement st = - (db->prepare << sql, - soci::into(ledgerSeq), - soci::into(txnSeq), - soci::into(status), - soci::into(txnData, dataPresent), - soci::into(txnMeta, metaPresent)); - - st.execute(); - - while (st.fetch()) - { - if (lookingForMarker) - { - if (findLedger == ledgerSeq.value_or(0) && - findSeq == txnSeq.value_or(0)) - { - lookingForMarker = false; - } - } - else if (numberOfResults == 0) - { - marker = { - rangeCheckedCast(ledgerSeq.value_or(0)), - txnSeq.value_or(0)}; - break; - } - - if (!lookingForMarker) - { - if (dataPresent == soci::i_ok) - convert(txnData, rawData); - else - rawData.clear(); - - if (metaPresent == soci::i_ok) - convert(txnMeta, rawMeta); - else - rawMeta.clear(); - - // Work around a bug that could leave the metadata missing - if (rawMeta.size() == 0) - onUnsavedLedger(ledgerSeq.value_or(0)); - - // `rawData` and `rawMeta` will be used after they are moved. - // That's OK. - onTransaction( - rangeCheckedCast(ledgerSeq.value_or(0)), - *status, - std::move(rawData), - std::move(rawMeta)); - // Note some callbacks will move the data, some will not. Clear - // them so code doesn't depend on if the data was actually moved - // or not. The code will be more efficient if `rawData` and - // `rawMeta` don't have to allocate in `convert`, so don't - // refactor my moving these variables into loop scope. - rawData.clear(); - rawMeta.clear(); - - --numberOfResults; - } - } - } - - return; -} } // namespace ripple diff --git a/src/ripple/app/misc/impl/AccountTxPaging.h b/src/ripple/app/misc/impl/AccountTxPaging.h index 5a6324cd53..ad3c40e56f 100644 --- a/src/ripple/app/misc/impl/AccountTxPaging.h +++ b/src/ripple/app/misc/impl/AccountTxPaging.h @@ -20,8 +20,7 @@ #ifndef RIPPLE_APP_MISC_IMPL_ACCOUNTTXPAGING_H_INCLUDED #define RIPPLE_APP_MISC_IMPL_ACCOUNTTXPAGING_H_INCLUDED -#include -#include +#include #include #include #include @@ -32,7 +31,7 @@ namespace ripple { void convertBlobsToTxResult( - NetworkOPs::AccountTxs& to, + RelationalDBInterface::AccountTxs& to, std::uint32_t ledger_index, std::string const& status, Blob const& rawTxn, @@ -42,22 +41,6 @@ convertBlobsToTxResult( void saveLedgerAsync(Application& app, std::uint32_t seq); -void -accountTxPage( - DatabaseCon& connection, - AccountIDCache const& idCache, - std::function const& onUnsavedLedger, - std::function< - void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& - onTransaction, - AccountID const& account, - std::int32_t minLedger, - std::int32_t maxLedger, - bool forward, - std::optional& marker, - int limit, - bool bAdmin, - std::uint32_t page_length); } // namespace ripple #endif diff --git a/src/ripple/app/misc/impl/AmendmentTable.cpp b/src/ripple/app/misc/impl/AmendmentTable.cpp index 633a8ece35..8cedabc836 100644 --- a/src/ripple/app/misc/impl/AmendmentTable.cpp +++ b/src/ripple/app/misc/impl/AmendmentTable.cpp @@ -19,8 +19,8 @@ #include #include +#include #include -#include #include #include #include @@ -328,25 +328,7 @@ AmendmentTableImpl::AmendmentTableImpl( // Find out if the FeatureVotes table exists in WalletDB bool const featureVotesExist = [this]() { auto db = db_.checkoutDb(); - soci::transaction tr(*db); - std::string sql = - "SELECT count(*) FROM sqlite_master " - "WHERE type='table' AND name='FeatureVotes'"; - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional featureVotesCount; - *db << sql, soci::into(featureVotesCount); - bool exists = static_cast(*featureVotesCount); - - // Create FeatureVotes table in WalletDB if it doesn't exist - if (!exists) - { - *db << "CREATE TABLE FeatureVotes ( " - "AmendmentHash CHARACTER(64) NOT NULL, " - "AmendmentName TEXT, " - "Veto INTEGER NOT NULL );"; - tr.commit(); - } - return exists; + return createFeatureVotes(*db); }(); // Parse supported amendments @@ -408,51 +390,42 @@ AmendmentTableImpl::AmendmentTableImpl( // Read amendment votes from wallet.db auto db = db_.checkoutDb(); - soci::transaction tr(*db); - std::string sql = - "SELECT AmendmentHash, AmendmentName, Veto FROM FeatureVotes"; - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional amendment_hash; - boost::optional amendment_name; - boost::optional vote_to_veto; - soci::statement st = - (db->prepare << sql, - soci::into(amendment_hash), - soci::into(amendment_name), - soci::into(vote_to_veto)); - st.execute(); - while (st.fetch()) - { - uint256 amend_hash; - if (!amend_hash.parseHex(*amendment_hash)) - { - Throw( - "Invalid amendment ID '" + *amendment_hash + " in wallet.db"); - } - if (*vote_to_veto) - { - // Unknown amendments are effectively vetoed already - if (auto s = get(amend_hash, sl)) + readAmendments( + *db, + [&](boost::optional amendment_hash, + boost::optional amendment_name, + boost::optional vote_to_veto) { + uint256 amend_hash; + if (!amend_hash.parseHex(*amendment_hash)) { - JLOG(j_.info()) << "Amendment {" << *amendment_name << ", " - << amend_hash << "} is vetoed."; - if (!amendment_name->empty()) - s->name = *amendment_name; - s->vetoed = true; + Throw( + "Invalid amendment ID '" + *amendment_hash + + " in wallet.db"); } - } - else // un-veto - { - if (auto s = add(amend_hash, sl)) + if (*vote_to_veto) { - JLOG(j_.debug()) << "Amendment {" << *amendment_name << ", " - << amend_hash << "} is un-vetoed."; - if (!amendment_name->empty()) - s->name = *amendment_name; - s->vetoed = false; + // Unknown amendments are effectively vetoed already + if (auto s = get(amend_hash, sl)) + { + JLOG(j_.info()) << "Amendment {" << *amendment_name << ", " + << amend_hash << "} is vetoed."; + if (!amendment_name->empty()) + s->name = *amendment_name; + s->vetoed = true; + } } - } - } + else // un-veto + { + if (auto s = add(amend_hash, sl)) + { + JLOG(j_.debug()) << "Amendment {" << *amendment_name << ", " + << amend_hash << "} is un-vetoed."; + if (!amendment_name->empty()) + s->name = *amendment_name; + s->vetoed = false; + } + } + }); } AmendmentState* @@ -509,15 +482,7 @@ AmendmentTableImpl::persistVote( bool vote_to_veto) const { auto db = db_.checkoutDb(); - soci::transaction tr(*db); - std::string sql = - "INSERT INTO FeatureVotes (AmendmentHash, AmendmentName, Veto) VALUES " - "('"; - sql += to_string(amendment); - sql += "', '" + name; - sql += "', '" + std::to_string(int{vote_to_veto}) + "');"; - *db << sql; - tr.commit(); + voteAmendment(*db, amendment, name, vote_to_veto); } bool diff --git a/src/ripple/app/misc/impl/Manifest.cpp b/src/ripple/app/misc/impl/Manifest.cpp index e889fcbd3f..6073d41eb9 100644 --- a/src/ripple/app/misc/impl/Manifest.cpp +++ b/src/ripple/app/misc/impl/Manifest.cpp @@ -18,10 +18,12 @@ //============================================================================== #include +#include #include #include #include #include +#include #include #include #include @@ -476,31 +478,8 @@ ManifestCache::applyManifest(Manifest m) void ManifestCache::load(DatabaseCon& dbCon, std::string const& dbTable) { - // Load manifests stored in database - std::string const sql = "SELECT RawData FROM " + dbTable + ";"; auto db = dbCon.checkoutDb(); - soci::blob sociRawData(*db); - soci::statement st = (db->prepare << sql, soci::into(sociRawData)); - st.execute(); - while (st.fetch()) - { - std::string serialized; - convert(sociRawData, serialized); - if (auto mo = deserializeManifest(serialized)) - { - if (!mo->verify()) - { - JLOG(j_.warn()) << "Unverifiable manifest in db"; - continue; - } - - applyManifest(std::move(*mo)); - } - else - { - JLOG(j_.warn()) << "Malformed manifest in database"; - } - } + ripple::getManifests(*db, dbTable, *this, j_); } bool @@ -567,30 +546,8 @@ ManifestCache::save( std::function isTrusted) { std::lock_guard lock{apply_mutex_}; - auto db = dbCon.checkoutDb(); - soci::transaction tr(*db); - *db << "DELETE FROM " << dbTable; - std::string const sql = - "INSERT INTO " + dbTable + " (RawData) VALUES (:rawData);"; - for (auto const& v : map_) - { - // Save all revocation manifests, - // but only save trusted non-revocation manifests. - if (!v.second.revoked() && !isTrusted(v.second.masterKey)) - { - JLOG(j_.info()) << "Untrusted manifest in cache not saved to db"; - continue; - } - - // soci does not support bulk insertion of blob data - // Do not reuse blob because manifest ecdsa signatures vary in length - // but blob write length is expected to be >= the last write - soci::blob rawData(*db); - convert(v.second.serialized, rawData); - *db << sql, soci::use(rawData); - } - tr.commit(); + saveManifests(*db, dbTable, isTrusted, map_, j_); } } // namespace ripple diff --git a/src/ripple/app/misc/impl/Transaction.cpp b/src/ripple/app/misc/impl/Transaction.cpp index 007150d8d8..ee391c7a9e 100644 --- a/src/ripple/app/misc/impl/Transaction.cpp +++ b/src/ripple/app/misc/impl/Transaction.cpp @@ -21,6 +21,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -131,77 +134,9 @@ Transaction::load( Transaction::Locator Transaction::locate(uint256 const& id, Application& app) { -#ifdef RIPPLED_REPORTING - auto baseCmd = boost::format(R"(SELECT tx('%s');)"); - - std::string txHash = "\\x" + strHex(id); - std::string sql = boost::str(baseCmd % txHash); - - auto res = PgQuery(app.getPgPool())(sql.data()); - - if (!res) - { - JLOG(app.journal("Transaction").error()) - << __func__ - << " : Postgres response is null - tx ID = " << strHex(id); - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(app.journal("Transaction").error()) - << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - tx ID = " << strHex(id); - assert(false); - return {}; - } - - JLOG(app.journal("Transaction").trace()) - << __func__ << " Postgres result msg : " << res.msg(); - if (res.isNull() || res.ntuples() == 0) - { - JLOG(app.journal("Transaction").debug()) - << __func__ - << " : No data returned from Postgres : tx ID = " << strHex(id); - // This shouldn't happen - assert(false); - return {}; - } - - char const* resultStr = res.c_str(); - JLOG(app.journal("Transaction").debug()) - << "postgres result = " << resultStr; - - Json::Value v; - Json::Reader reader; - bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); - if (success) - { - if (v.isMember("nodestore_hash") && v.isMember("ledger_seq")) - { - uint256 nodestoreHash; - if (!nodestoreHash.parseHex( - v["nodestore_hash"].asString().substr(2))) - assert(false); - uint32_t ledgerSeq = v["ledger_seq"].asUInt(); - if (nodestoreHash.isNonZero()) - return {std::make_pair(nodestoreHash, ledgerSeq)}; - } - if (v.isMember("min_seq") && v.isMember("max_seq")) - { - return {ClosedInterval( - v["min_seq"].asUInt(), v["max_seq"].asUInt())}; - } - } -#endif - // Shouldn' happen. Postgres should return the ledger range searched if - // the transaction was not found - assert(false); - Throw( - "Transaction::Locate - Invalid Postgres response"); + return dynamic_cast( + &app.getRelationalDBInterface()) + ->locateTransaction(id); } std::variant< @@ -213,77 +148,9 @@ Transaction::load( std::optional> const& range, error_code_i& ec) { - std::string sql = - "SELECT LedgerSeq,Status,RawTxn,TxnMeta " - "FROM Transactions WHERE TransID='"; - - sql.append(to_string(id)); - sql.append("';"); - - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional ledgerSeq; - boost::optional status; - Blob rawTxn, rawMeta; - { - auto db = app.getTxnDB().checkoutDb(); - soci::blob sociRawTxnBlob(*db), sociRawMetaBlob(*db); - soci::indicator txn, meta; - - *db << sql, soci::into(ledgerSeq), soci::into(status), - soci::into(sociRawTxnBlob, txn), soci::into(sociRawMetaBlob, meta); - - auto const got_data = db->got_data(); - - if ((!got_data || txn != soci::i_ok || meta != soci::i_ok) && !range) - return TxSearched::unknown; - - if (!got_data) - { - uint64_t count = 0; - soci::indicator rti; - - *db << "SELECT COUNT(DISTINCT LedgerSeq) FROM Transactions WHERE " - "LedgerSeq BETWEEN " - << range->first() << " AND " << range->last() << ";", - soci::into(count, rti); - - if (!db->got_data() || rti != soci::i_ok) - return TxSearched::some; - - return count == (range->last() - range->first() + 1) - ? TxSearched::all - : TxSearched::some; - } - - convert(sociRawTxnBlob, rawTxn); - convert(sociRawMetaBlob, rawMeta); - } - - try - { - auto txn = - Transaction::transactionFromSQL(ledgerSeq, status, rawTxn, app); - - if (!ledgerSeq) - return std::pair{std::move(txn), nullptr}; - - std::uint32_t inLedger = - rangeCheckedCast(ledgerSeq.value()); - - auto txMeta = std::make_shared(id, inLedger, rawMeta); - - return std::pair{std::move(txn), std::move(txMeta)}; - } - catch (std::exception& e) - { - JLOG(app.journal("Ledger").warn()) - << "Unable to deserialize transaction from raw SQL value. Error: " - << e.what(); - - ec = rpcDB_DESERIALIZATION; - } - - return TxSearched::unknown; + return dynamic_cast( + &app.getRelationalDBInterface()) + ->getTransaction(id, range, ec); } // options 1 to include the date of the transaction diff --git a/src/ripple/app/rdb/RelationalDBInterface.h b/src/ripple/app/rdb/RelationalDBInterface.h new file mode 100644 index 0000000000..f6b29a4736 --- /dev/null +++ b/src/ripple/app/rdb/RelationalDBInterface.h @@ -0,0 +1,267 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_H_INCLUDED +#define RIPPLE_CORE_RELATIONALDBINTERFACE_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +struct LedgerHashPair +{ + uint256 ledgerHash; + uint256 parentHash; +}; + +struct LedgerRange +{ + uint32_t min; + uint32_t max; +}; + +class RelationalDBInterface +{ +public: + struct CountMinMax + { + std::size_t numberOfRows; + LedgerIndex minLedgerSequence; + LedgerIndex maxLedgerSequence; + }; + + struct AccountTxMarker + { + std::uint32_t ledgerSeq = 0; + std::uint32_t txnSeq = 0; + }; + + struct AccountTxOptions + { + AccountID const& account; + std::uint32_t minLedger; + std::uint32_t maxLedger; + std::uint32_t offset; + std::uint32_t limit; + bool bUnlimited; + }; + + struct AccountTxPageOptions + { + AccountID const& account; + std::uint32_t minLedger; + std::uint32_t maxLedger; + std::optional marker; + std::uint32_t limit; + bool bAdmin; + }; + + using AccountTx = + std::pair, std::shared_ptr>; + using AccountTxs = std::vector; + using txnMetaLedgerType = std::tuple; + using MetaTxsList = std::vector; + + using LedgerSequence = uint32_t; + using LedgerHash = uint256; + using LedgerShortcut = RPC::LedgerShortcut; + using LedgerSpecifier = + std::variant; + + struct AccountTxArgs + { + AccountID account; + std::optional ledger; + bool binary = false; + bool forward = false; + uint32_t limit = 0; + std::optional marker; + }; + + struct AccountTxResult + { + std::variant transactions; + LedgerRange ledgerRange; + uint32_t limit; + std::optional marker; + }; + + /// Struct used to keep track of what to write to transactions and + /// account_transactions tables in Postgres + struct AccountTransactionsData + { + boost::container::flat_set accounts; + uint32_t ledgerSequence; + uint32_t transactionIndex; + uint256 txHash; + uint256 nodestoreHash; + + AccountTransactionsData( + TxMeta& meta, + uint256&& nodestoreHash, + beast::Journal& j) + : accounts(meta.getAffectedAccounts(j)) + , ledgerSequence(meta.getLgrSeq()) + , transactionIndex(meta.getIndex()) + , txHash(meta.getTxID()) + , nodestoreHash(std::move(nodestoreHash)) + { + } + }; + + /** + * @brief init Creates and returns appropriate interface based on config. + * @param app Application object. + * @param config Config object. + * @param jobQueue JobQueue object. + * @return Unique pointer to the interface. + */ + static std::unique_ptr + init(Application& app, Config const& config, JobQueue& jobQueue); + + virtual ~RelationalDBInterface() = default; + + /** + * @brief getMinLedgerSeq Returns minimum ledger sequence in Ledgers table. + * @return Ledger sequence or none if no ledgers exist. + */ + virtual std::optional + getMinLedgerSeq() = 0; + + /** + * @brief getMaxLedgerSeq Returns maximum ledger sequence in Ledgers table. + * @return Ledger sequence or none if no ledgers exist. + */ + virtual std::optional + getMaxLedgerSeq() = 0; + + /** + * @brief getLedgerInfoByIndex Returns ledger by its sequence. + * @param ledgerSeq Ledger sequence. + * @return Ledger or none if ledger not found. + */ + virtual std::optional + getLedgerInfoByIndex(LedgerIndex ledgerSeq) = 0; + + /** + * @brief getNewestLedgerInfo Returns info of newest saved ledger. + * @return Ledger info or none if ledger not found. + */ + virtual std::optional + getNewestLedgerInfo() = 0; + + /** + * @brief getLedgerInfoByHash Returns info of ledger with given hash. + * @param ledgerHash Hash of the ledger. + * @return Ledger or none if ledger not found. + */ + virtual std::optional + getLedgerInfoByHash(uint256 const& ledgerHash) = 0; + + /** + * @brief getHashByIndex Returns hash of ledger with given sequence. + * @param ledgerIndex Ledger sequence. + * @return Hash of the ledger. + */ + virtual uint256 + getHashByIndex(LedgerIndex ledgerIndex) = 0; + + /** + * @brief getHashesByIndex Returns hash of the ledger and hash of parent + * ledger for the ledger of given sequence. + * @param ledgerIndex Ledger sequence. + * @return Struct LedgerHashPair which contain hashes of the ledger and + * its parent ledger. + */ + virtual std::optional + getHashesByIndex(LedgerIndex ledgerIndex) = 0; + + /** + * @brief getHashesByIndex Returns hash of the ledger and hash of parent + * ledger for all ledgers with sequences from given minimum limit + * to given maximum limit. + * @param minSeq Minimum ledger sequence. + * @param maxSeq Maximum ledger sequence. + * @return Map which points sequence number of found ledger to the struct + * LedgerHashPair which contains ledger hash and its parent hash. + */ + virtual std::map + getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) = 0; + + /** + * @brief getTxHistory Returns most recent 20 transactions starting from + * given number or entry. + * @param startIndex First number of returned entry. + * @return Vector of sharded pointers to transactions sorted in + * descending order by ledger sequence. + */ + virtual std::vector> + getTxHistory(LedgerIndex startIndex) = 0; + + /** + * @brief ledgerDbHasSpace Checks if ledger database has available space. + * @param config Config object. + * @return True if space is available. + */ + virtual bool + ledgerDbHasSpace(Config const& config) = 0; + + /** + * @brief transactionDbHasSpace Checks if transaction database has + * available space. + * @param config Config object. + * @return True if space is available. + */ + virtual bool + transactionDbHasSpace(Config const& config) = 0; +}; + +template +T +rangeCheckedCast(C c) +{ + if ((c > std::numeric_limits::max()) || + (!std::numeric_limits::is_signed && c < 0) || + (std::numeric_limits::is_signed && + std::numeric_limits::is_signed && + c < std::numeric_limits::lowest())) + { + /* This should never happen */ + assert(0); + JLOG(debugLog().error()) + << "rangeCheckedCast domain error:" + << " value = " << c << " min = " << std::numeric_limits::lowest() + << " max: " << std::numeric_limits::max(); + } + + return static_cast(c); +} + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/RelationalDBInterface.md b/src/ripple/app/rdb/RelationalDBInterface.md new file mode 100644 index 0000000000..302c9befeb --- /dev/null +++ b/src/ripple/app/rdb/RelationalDBInterface.md @@ -0,0 +1,288 @@ +# Relational Database Interface + +Here are main principles of Relational DB interface: + +1) All SQL hard code is in the files described below in Files section. +No hard-coded SQL should be added to any other file in rippled, except related +to tests for specific SQL implementations. +2) Pure interface class `RelationalDBInterface` can have several +implementations for different relational database types. +3) For future use, if the node database is absent, then shard databases will +be used. + +## Configuration + +Section `[relational_db]` of the configuration file contains parameter +`backend`. The value of this parameter is the name of relational database +implementation used for node or shard databases. At the present, the only valid +value of this parameter is `sqlite`. + +## Files + +The following source files are related to Relational DB interface: + +- `ripple/app/rdb/RelationalDBInterface.h` - definition of main pure class of +the interface, `RelationalDBInterface`; +- `ripple/app/rdb/impl/RelationalDBInterface.cpp` - implementation of static +method `init()` of the class `RelationalDBInterface`; +- `ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h` - definition of pure +class `RelationalDBInterfaceSqlite` derived from `RelationalDBInterface`; +this is base class for sqlite implementation of the interface; +- `ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp` - implementation of +`RelationalDBInterfaceSqlite`-derived class for the case of sqlite databases; +- `ripple/app/rdb/backend/RelationalDBInterfacePostgres.h` - definition of pure +class `RelationalDBInterfacePostgres` derived from `RelationalDBInterface`; +this is base class for postgres implementation of the interface; +- `ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp` - implementation +of `RelationalDBInterfacePostgres`-derived class for the case of postgres +databases; +- `ripple/app/rdb/RelationalDBInterface_global.h` - definitions of global +methods for all sqlite databases except of node and shard; +- `ripple/app/rdb/impl/RelationalDBInterface_global.cpp` - implementations of +global methods for all sqlite databases except of node and shard; +- `ripple/app/rdb/RelationalDBInterface_nodes.h` - definitions of global +methods for sqlite node databases; +- `ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp` - implementations of +global methods for sqlite node databases; +- `ripple/app/rdb/RelationalDBInterface_shards.h` - definitions of global +methods for sqlite shard databases; +- `ripple/app/rdb/impl/RelationalDBInterface_shards.cpp` - implementations of +global methods for sqlite shard databases; +- `ripple/app/rdb/RelationalDBInterface_postgres.h` - definitions of internal +methods for postgres databases; +- `ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp` - implementations of +internal methods for postgres databases; + +## Classes + +The main class of the interface is `class RelationalDBInterface`. It is defined +in the file `RelationalDBInterface.h`. This class has static method `init()` +which allow to create proper `RelationalDBInterface`-derived class specified +in the config. All other methods are pure virtual. These methods do not use +database as a parameter. It assumed that implementation of class derived from +`RelationalDBInterface` holds all database pointers inside and uses appropriate +databases (nodes or shards) to get return values required by each method. + +At the present, there are two implementations of the derived classes - +`class RelationalDBInterfaceSqlite` for sqlite database (it is located in the +file `RelationalDBInterfaceSqlite.cpp`) and +`class RelationalDBInterfacePostgres` for postgres database (it is located in +the file `RelationalDBInterfacePostgres.cpp`) + +## Methods + +There are 3 types of methods for SQL interface: + +1) Global methods for work with all databases except of node. In particular, +methods related to shards datavases only. These methods are sqlite-specific. +They use `soci::session` as database pointer parameter. Defined and +implemented in files `RelationalDBInterface_global.*` and +`RelationalDBInterface_shard.*`. + +2) Global methods for work with node databases, and also with shard databases. +For sqlite case, these methods are internal for `RelationalDBInterfaceSqlite` +implementation of the class `RelationalDBInterface`. They use `soci::session` +as database pointer parameter. Defined and implemented in files +`RelationalDBInterface_nodes.*`. For postgres case, these methods are internal +for `RelationalDBInterfacePostgres` implementation of the class +`RelationalDBInterface`. They use `std::shared_ptr` as database pointer +parameter. Defined and implemented in files `RelationalDBInterface_postgres.*`. + +3) Virtual methods of class `RelationalDBInterface` and also derived classes +`RelationalDBInterfaceSqlite` and `RelationalDBInterfacePostgres`. +Calling such a method resulted in calling corresponding method from +`RelationalDBInterface`-derived class. For sqlite case, such a method tries to +retrieve information from node database, and if this database not exists - then +from shard databases. For both node and shard databases, calls to global +methods of type 2) performed. For postgres case, such a method retrieves +information only from node database by calling a global method of type 2). + +## Methods lists + +### Type 1 methods + +#### Files RelationalDBInterface_global.* + +Wallet DB methods: +``` +makeWalletDB +makeTestWalletDB +getManifests +saveManifests +addValidatorManifest +getNodeIdentity +getPeerReservationTable +insertPeerReservation +deletePeerReservation +createFeatureVotes +readAmendments +voteAmendment +``` + +State DB methods: +``` +initStateDB +getCanDelete +setCanDelete +getSavedState +setSavedState +setLastRotated +``` + +DatabaseBody DB methods: +``` +openDatabaseBodyDb +databaseBodyDoPut +databaseBodyFinish +``` + +Vacuum DB method: +``` +doVacuumDB +``` + +PeerFinder DB methods: +``` +initPeerFinderDB +updatePeerFinderDB +readPeerFinderDB +savePeerFinderDB +``` + +#### Files RelationalDBInterface_shards.* + +Shards DB methods: +``` +makeShardCompleteLedgerDBs +makeShardIncompleteLedgerDBs +updateLedgerDBs +``` + +Shard acquire DB methods: +``` +makeAcquireDB +insertAcquireDBIndex +selectAcquireDBLedgerSeqs +selectAcquireDBLedgerSeqsHash +updateAcquireDB +``` + +Shard archive DB methods: +``` +makeArchiveDB +readArchiveDB +insertArchiveDB +deleteFromArchiveDB +dropArchiveDB +``` + +### Type 2 methods + +#### Files RelationalDBInterface_nodes.* + +``` +makeLedgerDBs +getMinLedgerSeq +getMaxLedgerSeq +deleteByLedgerSeq +deleteBeforeLedgerSeq +getRows +getRowsMinMax +saveValidatedLedger +getLedgerInfoByIndex +getOldestLedgerInfo +getNewestLedgerInfo +getLimitedOldestLedgerInfo +getLimitedNewestLedgerInfo +getLedgerInfoByHash +getHashByIndex +getHashesByIndex +getHashesByIndex +getTxHistory +getOldestAccountTxs +getNewestAccountTxs +getOldestAccountTxsB +getNewestAccountTxsB +oldestAccountTxPage +newestAccountTxPage +getTransaction +DbHasSpace +``` + +#### Files RelationalDBInterface_postgres.* + +``` +getMinLedgerSeq +getMaxLedgerSeq +getCompleteLedgers +getValidatedLedgerAge +getNewestLedgerInfo +getLedgerInfoByIndex +getLedgerInfoByHash +getHashByIndex +getHashesByIndex +getTxHashes +getAccountTx +locateTransaction +writeLedgerAndTransactions +getTxHistory +``` + +### Type 3 methods + +#### Files RelationalDBInterface.* + +``` +init +getMinLedgerSeq +getMaxLedgerSeq +getLedgerInfoByIndex +getNewestLedgerInfo +getLedgerInfoByHash +getHashByIndex +getHashesByIndex +getTxHistory +ledgerDbHasSpace +transactionDbHasSpace +``` + +#### Files backend/RelationalDBInterfaceSqlite.* + +``` +getTransactionsMinLedgerSeq +getAccountTransactionsMinLedgerSeq +deleteTransactionByLedgerSeq +deleteBeforeLedgerSeq +deleteTransactionsBeforeLedgerSeq +deleteAccountTransactionsBeforeLedgerSeq +getTransactionCount +getAccountTransactionCount +getLedgerCountMinMax +saveValidatedLedger +getLimitedOldestLedgerInfo +getLimitedNewestLedgerInfo +getOldestAccountTxs +getNewestAccountTxs +getOldestAccountTxsB +getNewestAccountTxsB +oldestAccountTxPage +newestAccountTxPage +oldestAccountTxPageB +newestAccountTxPageB +getTransaction +getKBUsedAll +getKBUsedLedger +getKBUsedTransaction +``` + +#### Files backend/RelationalDBInterfacePostgres.* + +``` +sweep +getCompleteLedgers +getValidatedLedgerAge +writeLedgerAndTransactions +getTxHashes +getAccountTx +locateTransaction +``` diff --git a/src/ripple/app/rdb/RelationalDBInterface_global.h b/src/ripple/app/rdb/RelationalDBInterface_global.h new file mode 100644 index 0000000000..8f62a47a01 --- /dev/null +++ b/src/ripple/app/rdb/RelationalDBInterface_global.h @@ -0,0 +1,329 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_GLOBAL_H_INCLUDED +#define RIPPLE_CORE_RELATIONALDBINTERFACE_GLOBAL_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +/* Wallet DB */ + +/** + * @brief makeWalletDB Opens wallet DB and returns it. + * @param setup Path to database and other opening parameters. + * @return Unique pointer to database descriptor. + */ +std::unique_ptr +makeWalletDB(DatabaseCon::Setup const& setup); + +/** + * @brief makeTestWalletDB Opens test wallet DB with arbitrary name. + * @param setup Path to database and other opening parameters. + * @param dbname Name of database. + * @return Unique pointer to database descriptor. + */ +std::unique_ptr +makeTestWalletDB(DatabaseCon::Setup const& setup, std::string const& dbname); + +/** + * @brief getManifests Loads manifest from wallet DB and stores it in the cache. + * @param session Session with database. + * @param dbTable Name of table in the database to extract manifest from. + * @param mCache Cache to store manifest. + * @param j Journal. + */ +void +getManifests( + soci::session& session, + std::string const& dbTable, + ManifestCache& mCache, + beast::Journal j); + +/** + * @brief saveManifests Saves all given manifests to database. + * @param session Session with database. + * @param dbTable Name of database table to save manifest into. + * @param isTrusted Callback returned true if key is trusted. + * @param map Map to save which points public keys to manifests. + * @param j Journal. + */ +void +saveManifests( + soci::session& session, + std::string const& dbTable, + std::function isTrusted, + hash_map const& map, + beast::Journal j); + +/** + * @brief addValidatorManifest Saves manifest of validator to database. + * @param session Session with database. + * @param serialized Manifest of validator in raw format. + */ +void +addValidatorManifest(soci::session& session, std::string const& serialized); + +/** + * @brief getNodeIdentity Returns public and private keys of this node. + * @param session Session with database. + * @return Pair of public and private keys. + */ +std::pair +getNodeIdentity(soci::session& session); + +/** + * @brief getPeerReservationTable Returns peer reservation table. + * @param session Session with database. + * @param j Journal. + * @return Peer reservation hash table. + */ +std::unordered_set, KeyEqual> +getPeerReservationTable(soci::session& session, beast::Journal j); + +/** + * @brief insertPeerReservation Adds entry to peer reservation table. + * @param session Session with database. + * @param nodeId public key of node. + * @param description Description of node. + */ +void +insertPeerReservation( + soci::session& session, + PublicKey const& nodeId, + std::string const& description); + +/** + * @brief deletePeerReservation Deletes entry from peer reservation table. + * @param session Session with database. + * @param nodeId Public key of node to remove. + */ +void +deletePeerReservation(soci::session& session, PublicKey const& nodeId); + +/** + * @brief createFeatureVotes Creates FeatureVote table if it is not exists. + * @param session Session with walletDB database. + * @return true if the table already exists + */ +bool +createFeatureVotes(soci::session& session); + +/** + * @brief readAmendments Read all amendments from FeatureVotes table. + * @param session Session with walletDB database. + * @param callback Callback called for each amendment passing its hash, name + * and teh flag if it should be vetoed as callback parameters + */ +void +readAmendments( + soci::session& session, + std::function amendment_hash, + boost::optional amendment_name, + boost::optional vote_to_veto)> const& callback); + +/** + * @brief voteAmendment Set veto value for particular amendment. + * @param session Session with walletDB database. + * @param amendment Hash of amendment. + * @param name Name of amendment. + * @param vote_to_veto Trus if this amendment should be vetoed. + */ +void +voteAmendment( + soci::session& session, + uint256 const& amendment, + std::string const& name, + bool vote_to_veto); + +/* State DB */ + +struct SavedState +{ + std::string writableDb; + std::string archiveDb; + LedgerIndex lastRotated; +}; + +/** + * @brief initStateDB Opens DB session with State DB. + * @param session Structure to open session in. + * @param config Path to database and other opening parameters. + * @param dbName Name of database. + */ +void +initStateDB( + soci::session& session, + BasicConfig const& config, + std::string const& dbName); + +/** + * @brief getCanDelete Returns ledger sequence which can be deleted. + * @param session Session with database. + * @return Ledger sequence. + */ +LedgerIndex +getCanDelete(soci::session& session); + +/** + * @brief setCanDelete Updates ledger sequence which can be deleted. + * @param session Session with database. + * @param canDelete Ledger sequence to save. + * @return Previous value of ledger sequence whic can be deleted. + */ +LedgerIndex +setCanDelete(soci::session& session, LedgerIndex canDelete); + +/** + * @brief getSavedState Returns saved state. + * @param session Session with database. + * @return The SavedState structure which contains names of + * writable DB, archive DB and last rotated ledger sequence. + */ +SavedState +getSavedState(soci::session& session); + +/** + * @brief setSavedState Saves given state. + * @param session Session with database. + * @param state The SavedState structure which contains names of + * writable DB, archive DB and last rotated ledger sequence. + */ +void +setSavedState(soci::session& session, SavedState const& state); + +/** + * @brief setLastRotated Updates last rotated ledger sequence. + * @param session Session with database. + * @param seq New value of last rotated ledger sequence. + */ +void +setLastRotated(soci::session& session, LedgerIndex seq); + +/* DatabaseBody DB */ + +/** + * @brief openDatabaseBodyDb Opens file download DB and returns its descriptor. + * Start new download process or continue existing one. + * @param setup Path to database and other opening parameters. + * @param path Path of new file to download. + * @return Pair of unique pointer to database and current downloaded size + * if download process continues. + */ +std::pair, std::optional> +openDatabaseBodyDb( + DatabaseCon::Setup const& setup, + boost::filesystem::path path); + +/** + * @brief databaseBodyDoPut Saves new fragment of downloaded file. + * @param session Session with database. + * @param data Downloaded piece to file data tp save. + * @param path Path of downloading file. + * @param fileSize Size of downloaded piece of file. + * @param part Sequence number of downloaded file part. + * @param maxRowSizePad Maximum size of file part to save. + * @return Number of saved parts. Downloaded piece may be splitted + * into several parts of size not large that maxRowSizePad. + */ +std::uint64_t +databaseBodyDoPut( + soci::session& session, + std::string const& data, + std::string const& path, + std::uint64_t fileSize, + std::uint64_t part, + std::uint16_t maxRowSizePad); + +/** + * @brief databaseBodyFinish Finishes download process and writes file to disk. + * @param session Session with database. + * @param fout Opened file to write downloaded data from database. + */ +void +databaseBodyFinish(soci::session& session, std::ofstream& fout); + +/* Vacuum DB */ + +/** + * @brief doVacuumDB Creates, initialises DB, and performs its cleanup. + * @param setup Path to database and other opening parameters. + * @return True if vacuum process completed successfully. + */ +bool +doVacuumDB(DatabaseCon::Setup const& setup); + +/* PeerFinder DB */ + +/** + * @brief initPeerFinderDB Opens session with peer finder database. + * @param session Structure to open session in. + * @param config Path to database and other opening parameters. + * @param j Journal. + */ +void +initPeerFinderDB( + soci::session& session, + BasicConfig const& config, + beast::Journal j); + +/** + * @brief updatePeerFinderDB Update peer finder DB to new version. + * @param session Session with database. + * @param currentSchemaVersion New version of database. + * @param j Journal. + */ +void +updatePeerFinderDB( + soci::session& session, + int currentSchemaVersion, + beast::Journal j); + +/** + * @brief readPeerFinderDB Read all entries from peer finder DB and call + * given callback for each entry. + * @param session Session with database. + * @param func Callback to call for each entry. + */ +void +readPeerFinderDB( + soci::session& session, + std::function const& func); + +/** + * @brief savePeerFinderDB Save new entry to peer finder DB. + * @param session Session with database. + * @param v Entry to save which contains information about new peer. + */ +void +savePeerFinderDB( + soci::session& session, + std::vector const& v); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/RelationalDBInterface_nodes.h b/src/ripple/app/rdb/RelationalDBInterface_nodes.h new file mode 100644 index 0000000000..3bf50a0428 --- /dev/null +++ b/src/ripple/app/rdb/RelationalDBInterface_nodes.h @@ -0,0 +1,472 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_NODES_H_INCLUDED +#define RIPPLE_CORE_RELATIONALDBINTERFACE_NODES_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +/* Need to change TableTypeCount if TableType is modified. */ +enum class TableType { Ledgers, Transactions, AccountTransactions }; +constexpr int TableTypeCount = 3; + +struct DatabasePairValid +{ + std::unique_ptr ledgerDb; + std::unique_ptr transactionDb; + bool valid; +}; + +/** + * @brief makeLedgerDBs Opens ledger and transactions databases. + * @param config Config object. + * @param setup Path to database and opening parameters. + * @param checkpointerSetup Database checkpointer setup. + * @return Struct DatabasePairValid which contain unique pointers to ledger + * and transaction databases and flag if opening was successfull. + */ +DatabasePairValid +makeLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup); + +/** + * @brief getMinLedgerSeq Returns minimum ledger sequence in given table. + * @param session Session with database. + * @param type Table ID for which the result is returned. + * @return Ledger sequence or none if no ledgers exist. + */ +std::optional +getMinLedgerSeq(soci::session& session, TableType type); + +/** + * @brief getMaxLedgerSeq Returns maximum ledger sequence in given table. + * @param session Session with database. + * @param type Table ID for which the result is returned. + * @return Ledger sequence or none if no ledgers exist. + */ +std::optional +getMaxLedgerSeq(soci::session& session, TableType type); + +/** + * @brief deleteByLedgerSeq Deletes all entries in given table + * for the ledger with given sequence. + * @param session Session with database. + * @param type Table ID from which entries will be deleted. + * @param ledgerSeq Ledger sequence. + */ +void +deleteByLedgerSeq( + soci::session& session, + TableType type, + LedgerIndex ledgerSeq); + +/** + * @brief deleteBeforeLedgerSeq Deletes all entries in given table + * for the ledgers with given sequence and all sequences below it. + * @param session Session with database. + * @param type Table ID from which entries will be deleted. + * @param ledgerSeq Ledger sequence. + */ +void +deleteBeforeLedgerSeq( + soci::session& session, + TableType type, + LedgerIndex ledgerSeq); + +/** + * @brief getRows Returns number of rows in given table. + * @param session Session with database. + * @param type Table ID for which the result is returned. + * @return Number of rows. + */ +std::size_t +getRows(soci::session& session, TableType type); + +/** + * @brief getRowsMinMax Returns minumum ledger sequence, + * maximum ledger sequence and total number of rows in given table. + * @param session Session with database. + * @param type Table ID for which the result is returned. + * @return Struct CountMinMax which contain minimum sequence, + * maximum sequence and number of rows. + */ +RelationalDBInterface::CountMinMax +getRowsMinMax(soci::session& session, TableType type); + +/** + * @brief saveValidatedLedger Saves ledger into database. + * @param lgrDB Link to ledgers database. + * @param txnDB Link to transactions database. + * @param app Application object. + * @param ledger The ledger. + * @param current True if ledger is current. + * @return True is saving was successfull. + */ +bool +saveValidatedLedger( + DatabaseCon& ldgDB, + DatabaseCon& txnDB, + Application& app, + std::shared_ptr const& ledger, + bool current); + +/** + * @brief getLedgerInfoByIndex Returns ledger by its sequence. + * @param session Session with database. + * @param ledgerSeq Ledger sequence. + * @param j Journal. + * @return Ledger or none if ledger not found. + */ +std::optional +getLedgerInfoByIndex( + soci::session& session, + LedgerIndex ledgerSeq, + beast::Journal j); + +/** + * @brief getNewestLedgerInfo Returns info of newest saved ledger. + * @param session Session with database. + * @param j Journal. + * @return Ledger info or none if ledger not found. + */ +std::optional +getNewestLedgerInfo(soci::session& session, beast::Journal j); + +/** + * @brief getLimitedOldestLedgerInfo Returns info of oldest ledger + * from ledgers with sequences greather or equal to given. + * @param session Session with database. + * @param ledgerFirstIndex Minimum ledger sequence. + * @param j Journal. + * @return Ledger info or none if ledger not found. + */ +std::optional +getLimitedOldestLedgerInfo( + soci::session& session, + LedgerIndex ledgerFirstIndex, + beast::Journal j); + +/** + * @brief getLimitedNewestLedgerInfo Returns info of newest ledger + * from ledgers with sequences greather or equal to given. + * @param session Session with database. + * @param ledgerFirstIndex Minimum ledger sequence. + * @param j Journal. + * @return Ledger info or none if ledger not found. + */ +std::optional +getLimitedNewestLedgerInfo( + soci::session& session, + LedgerIndex ledgerFirstIndex, + beast::Journal j); + +/** + * @brief getLedgerInfoByHash Returns info of ledger with given hash. + * @param session Session with database. + * @param ledgerHash Hash of the ledger. + * @param j Journal. + * @return Ledger or none if ledger not found. + */ +std::optional +getLedgerInfoByHash( + soci::session& session, + uint256 const& ledgerHash, + beast::Journal j); + +/** + * @brief getHashByIndex Returns hash of ledger with given sequence. + * @param session Session with database. + * @param ledgerIndex Ledger sequence. + * @return Hash of the ledger. + */ +uint256 +getHashByIndex(soci::session& session, LedgerIndex ledgerIndex); + +/** + * @brief getHashesByIndex Returns hash of the ledger and hash of parent + * ledger for the ledger of given sequence. + * @param session Session with database. + * @param ledgerIndex Ledger sequence. + * @param j Journal. + * @return Struct LedgerHashPair which contain hashes of the ledger and + * its parent ledger. + */ +std::optional +getHashesByIndex( + soci::session& session, + LedgerIndex ledgerIndex, + beast::Journal j); + +/** + * @brief getHashesByIndex Returns hash of the ledger and hash of parent + * ledger for all ledgers with seqyences from given minimum limit + * to fiven maximum limit. + * @param session Session with database. + * @param minSeq Minimum ledger sequence. + * @param maxSeq Maximum ledger sequence. + * @param j Journal. + * @return Map which points sequence number of found ledger to the struct + * LedgerHashPair which contauns ledger hash and its parent hash. + */ +std::map +getHashesByIndex( + soci::session& session, + LedgerIndex minSeq, + LedgerIndex maxSeq, + beast::Journal j); + +/** + * @brief getTxHistory Returns given number of most recent transactions + * starting from given number of entry. + * @param session Session with database. + * @param app Application object. + * @param startIndex Offset of first returned entry. + * @param quantity Number of returned entries. + * @param count True if counting of all transaction in that shard required. + * @return Vector of shared pointers to transactions sorted in + * descending order by ledger sequence. Also number of transactions + * if count == true. + */ +std::pair>, int> +getTxHistory( + soci::session& session, + Application& app, + LedgerIndex startIndex, + int quantity, + bool count); + +/** + * @brief getOldestAccountTxs Returns oldest transactions for given + * account which match given criteria starting from given offset. + * @param session Session with database. + * @param app Application object. + * @param ledgerMaster LedgerMaster object. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number or transactions already returned in calls + * to another shard databases, if shard databases are used. + * None if node database is used. + * @param j Journal. + * @return Vector of pairs of found transactions and their metadata + * sorted in ascending order by account sequence. + * Also number of transactions processed. + */ +std::pair +getOldestAccountTxs( + soci::session& session, + Application& app, + LedgerMaster& ledgerMaster, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + beast::Journal j); + +/** + * @brief getNewestAccountTxs Returns newest transactions for given + * account which match given criteria starting from given offset. + * @param session Session with database. + * @param app Application object. + * @param ledgerMaster LedgerMaster object. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number or transactions already returned in calls + * to another shard databases, if shard databases are used. + * None if node database is used. + * @param j Journal. + * @return Vector of pairs of found transactions and their metadata + * sorted in descending order by account sequence. + * Also number of transactions processed. + */ +std::pair +getNewestAccountTxs( + soci::session& session, + Application& app, + LedgerMaster& ledgerMaster, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + beast::Journal j); + +/** + * @brief getOldestAccountTxsB Returns oldest transactions in binary form + * for given account which match given criteria starting from given + * offset. + * @param session Session with database. + * @param app Application object. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number or transactions already returned in calls + * to another shard databases, if shard databases are used. + * None if node database is used. + * @param j Journal. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in ascending order by account + * sequence. Also number of transactions processed. + */ +std::pair, int> +getOldestAccountTxsB( + soci::session& session, + Application& app, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + beast::Journal j); + +/** + * @brief getNewestAccountTxsB Returns newest transactions in binary form + * for given account which match given criteria starting from given + * offset. + * @param session Session with database. + * @param app Application object. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number or transactions already returned in calls + * to another shard databases, if shard databases are used. + * None if node database is used. + * @param j Journal. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in descending order by account + * sequence. Also number of transactions processed. + */ +std::pair, int> +getNewestAccountTxsB( + soci::session& session, + Application& app, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + beast::Journal j); + +/** + * @brief oldestAccountTxPage Searches oldest transactions for given + * account which match given criteria starting from given marker + * and calls callback for each found transaction. + * @param session Session with database. + * @param idCache Account ID cache. + * @param onUnsavedLedger Callback function to call on each found unsaved + * ledger within given range. + * @param onTransaction Callback function to call on each found transaction. + * @param options Struct AccountTxPageOptions which contain criteria to + * match: the account, minimum and maximum ledger numbers to search, + * marker of first returned entry, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number or transactions already returned in calls + * to another shard databases. + * @param page_length Total number of transactions to return. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in ascending order by account + * sequence and marker for next search if search not finished. + * Also number of transactions processed during this call. + */ +std::pair, int> +oldestAccountTxPage( + soci::session& session, + AccountIDCache const& idCache, + std::function const& onUnsavedLedger, + std::function< + void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& + onTransaction, + RelationalDBInterface::AccountTxPageOptions const& options, + int limit_used, + std::uint32_t page_length); + +/** + * @brief newestAccountTxPage Searches newest transactions for given + * account which match given criteria starting from given marker + * and calls callback for each found transaction. + * @param session Session with database. + * @param idCache Account ID cache. + * @param onUnsavedLedger Callback function to call on each found unsaved + * ledger within given range. + * @param onTransaction Callback function to call on each found transaction. + * @param options Struct AccountTxPageOptions which contain criteria to + * match: the account, minimum and maximum ledger numbers to search, + * marker of first returned entry, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number or transactions already returned in calls + * to another shard databases. + * @param page_length Total number of transactions to return. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in descending order by account + * sequence and marker for next search if search not finished. + * Also number of transactions processed during this call. + */ +std::pair, int> +newestAccountTxPage( + soci::session& session, + AccountIDCache const& idCache, + std::function const& onUnsavedLedger, + std::function< + void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& + onTransaction, + RelationalDBInterface::AccountTxPageOptions const& options, + int limit_used, + std::uint32_t page_length); + +/** + * @brief getTransaction Returns transaction with given hash. If not found + * and range given then check if all ledgers from the range are + * present in the database. + * @param session Session with database. + * @param app Application object. + * @param id Hash of the transaction. + * @param range Range of ledgers to check, if present. + * @param ec Default value of error code. + * @return Transaction and its metadata if found, TxSearched::all if range + * given and all ledgers from range are present in the database, + * TxSearched::some if range given and not all ledgers are present, + * TxSearched::unknown if range not given or deserializing error + * occured. In the last case error code modified in ec link + * parameter, in other cases default error code remained. + */ +std::variant +getTransaction( + soci::session& session, + Application& app, + uint256 const& id, + std::optional> const& range, + error_code_i& ec); + +/** + * @brief dbHasSpace Checks if given database has available space. + * @param session Session with database. + * @param config Config object. + * @param j Journal. + * @return True if space is available. + */ +bool +dbHasSpace(soci::session& session, Config const& config, beast::Journal j); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/RelationalDBInterface_postgres.h b/src/ripple/app/rdb/RelationalDBInterface_postgres.h new file mode 100644 index 0000000000..f5838813b5 --- /dev/null +++ b/src/ripple/app/rdb/RelationalDBInterface_postgres.h @@ -0,0 +1,248 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_POSTGRES_H_INCLUDED +#define RIPPLE_CORE_RELATIONALDBINTERFACE_POSTGRES_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +class PgPool; + +using AccountTxMarker = RelationalDBInterface::AccountTxMarker; +using AccountTxArgs = RelationalDBInterface::AccountTxArgs; +using AccountTxResult = RelationalDBInterface::AccountTxResult; +using AccountTransactionsData = RelationalDBInterface::AccountTransactionsData; + +/** + * @brief getMinLedgerSeq Returns minimum ledger sequence + * from Postgres database + * @param pgPool Link to postgres database + * @param app Application + * @param j Journal + * @return Minimum ledger sequence if any, none if no ledgers + */ +std::optional +getMinLedgerSeq(std::shared_ptr const& pgPool, beast::Journal j); + +/** + * @brief getMaxLedgerSeq Returns maximum ledger sequence + * from Postgres database + * @param pgPool Link to postgres database + * @param app Application + * @return Maximum ledger sequence if any, none if no ledgers + */ +std::optional +getMaxLedgerSeq(std::shared_ptr const& pgPool); + +/** + * @brief getCompleteLedgers Returns string which contains + * list of completed ledgers + * @param pgPool Link to postgres database + * @param app Application + * @return String with completed ledgers + */ +std::string +getCompleteLedgers(std::shared_ptr const& pgPool); + +/** + * @brief getValidatedLedgerAge Returns age of last + * validated ledger + * @param pgPool Link to postgres database + * @param app Application + * @param j Journal + * @return Age of last validated ledger + */ +std::chrono::seconds +getValidatedLedgerAge(std::shared_ptr const& pgPool, beast::Journal j); + +/** + * @brief getNewestLedgerInfo Load latest ledger info from Postgres + * @param pgPool Link to postgres database + * @param app reference to Application + * @return Ledger info + */ +std::optional +getNewestLedgerInfo(std::shared_ptr const& pgPool, Application& app); + +/** + * @brief getLedgerInfoByIndex Load ledger info by index (AKA sequence) + * from Postgres + * @param pgPool Link to postgres database + * @param ledgerIndex the ledger index (or sequence) to load + * @param app reference to Application + * @return Ledger info + */ +std::optional +getLedgerInfoByIndex( + std::shared_ptr const& pgPool, + std::uint32_t ledgerIndex, + Application& app); + +/** + * @brief getLedgerInfoByHash Load ledger info by hash from Postgres + * @param pgPool Link to postgres database + * @param hash Hash of the ledger to load + * @param app reference to Application + * @return Ledger info + */ +std::optional +getLedgerInfoByHash( + std::shared_ptr const& pgPool, + uint256 const& ledgerHash, + Application& app); + +/** + * @brief getHashByIndex Given a ledger sequence, + * return the ledger hash + * @param pgPool Link to postgres database + * @param ledgerIndex Ledger sequence + * @param app Application + * @return Hash of ledger + */ +uint256 +getHashByIndex( + std::shared_ptr const& pgPool, + std::uint32_t ledgerIndex, + Application& app); + +/** + * @brief getHashesByIndex Given a ledger sequence, + * return the ledger hash and the parent hash + * @param pgPool Link to postgres database + * @param ledgerIndex Ledger sequence + * @param[out] ledgerHash Hash of ledger + * @param[out] parentHash Hash of parent ledger + * @param app Application + * @return True if the data was found + */ +bool +getHashesByIndex( + std::shared_ptr const& pgPool, + std::uint32_t ledgerIndex, + uint256& ledgerHash, + uint256& parentHash, + Application& app); + +/** + * @brief getHashesByIndex Given a contiguous range of sequences, + * return a map of sequence -> (hash, parent hash) + * @param pgPool Link to postgres database + * @param minSeq Lower bound of range + * @param maxSeq Upper bound of range + * @param app Application + * @return Mapping of all found ledger sequences to their hash and parent hash + */ +std::map +getHashesByIndex( + std::shared_ptr const& pgPool, + std::uint32_t minSeq, + std::uint32_t maxSeq, + Application& app); + +/** + * @brief getTxHashes Returns vector of tx hashes by given ledger + * sequence + * @param pgPool Link to postgres database + * @param seq Ledger sequence + * @param app Application + * @return Vector of tx hashes + */ +std::vector +getTxHashes( + std::shared_ptr const& pgPool, + LedgerIndex seq, + Application& app); + +/** + * @brief locateTransaction Returns information used to locate + * a transaction. Function is specific to postgres backend. + * @param pgPool Link to postgres database + * @param id Hash of the transaction. + * @param app Application + * @return Information used to locate a transaction. Contains a nodestore + * hash and ledger sequence pair if the transaction was found. + * Otherwise, contains the range of ledgers present in the database + * at the time of search. + */ +Transaction::Locator +locateTransaction( + std::shared_ptr const& pgPool, + uint256 const& id, + Application& app); + +/** + * @brief getTxHistory Returns most recent 20 transactions starting + * from given number or entry. + * @param pgPool Link to postgres database + * @param startIndex First number of returned entry. + * @param app Application + * @param j Journal + * @return Vector of sharded pointers to transactions sorted in + * descending order by ledger sequence. + */ +std::vector> +getTxHistory( + std::shared_ptr const& pgPool, + LedgerIndex startIndex, + Application& app, + beast::Journal j); + +/** + * @brief getAccountTx Get last account transactions specifies by + * passed argumenrs structure. + * @param pgPool Link to postgres database + * @param args Arguments which specify account and whose tx to return. + * @param app Application + * @param j Journal + * @return Vector of account transactions and RPC status of responce. + */ +std::pair +getAccountTx( + std::shared_ptr const& pgPool, + AccountTxArgs const& args, + Application& app, + beast::Journal j); + +/** + * @brief writeLedgerAndTransactions Write new ledger and transaction + * data to Postgres. + * @param pgPool Pool of Postgres connections + * @param info Ledger info to write. + * @param accountTxData Transaction data to write + * @param j Journal (for logging) + * @return True if success, false if failure. + */ +bool +writeLedgerAndTransactions( + std::shared_ptr const& pgPool, + LedgerInfo const& info, + std::vector const& accountTxData, + beast::Journal& j); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/RelationalDBInterface_shards.h b/src/ripple/app/rdb/RelationalDBInterface_shards.h new file mode 100644 index 0000000000..51bbdce56f --- /dev/null +++ b/src/ripple/app/rdb/RelationalDBInterface_shards.h @@ -0,0 +1,202 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_SHARDS_H_INCLUDED +#define RIPPLE_CORE_RELATIONALDBINTERFACE_SHARDS_H_INCLUDED + +#include +#include +#include +#include + +namespace ripple { + +struct DatabasePair +{ + std::unique_ptr ledgerDb; + std::unique_ptr transactionDb; +}; + +/* Shard DB */ + +/** + * @brief makeShardCompleteLedgerDBs Opens shard databases for already + * verified shard and returns its descriptors. + * @param config Config object. + * @param setup Path to database and other opening parameters. + * @return Pair of unique pointers to opened ledger and transaction databases. + */ +DatabasePair +makeShardCompleteLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup); + +/** + * @brief makeShardIncompleteLedgerDBs Opens shard databases for not + * fully downloaded or verified shard and returns its descriptors. + * @param config Config object. + * @param setup Path to database and other opening parameters. + * @param checkpointerSetup Checkpointer parameters. + * @return Pair of unique pointers to opened ledger and transaction databases. + */ +DatabasePair +makeShardIncompleteLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup); + +/** + * @brief updateLedgerDBs Save given ledger to shard databases. + * @param txdb Session with transaction DB. + * @param lgrdb Sessiob with ledger DB. + * @param ledger Ledger to save. + * @param index Index of the shard which the ledger belonfs to. + * @param stop Link to atomic flag which can stop the process if raised. + * @param j Journal + * @return True if ledger was successfully saved. + */ +bool +updateLedgerDBs( + soci::session& txdb, + soci::session& lgrdb, + std::shared_ptr const& ledger, + std::uint32_t index, + std::atomic& stop, + beast::Journal j); + +/* Shard acquire DB */ + +/** + * @brief makeAcquireDB Opens shard acquire DB and returns its descriptor. + * @param setup Path to DB and other opening parameters. + * @param checkpointerSetup Checkpointer parameters. + * @return Uniqye pointer to opened database. + */ +std::unique_ptr +makeAcquireDB( + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup); + +/** + * @brief insertAcquireDBIndex Adds new shard index to shard acquire DB. + * @param session Session with database. + * @param index Index to add. + */ +void +insertAcquireDBIndex(soci::session& session, std::uint32_t index); + +/** + * @brief selectAcquireDBLedgerSeqs Returns set of acquired ledgers for + * given shard. + * @param session Session with database. + * @param index Shard index. + * @return Pair which contains true if such as index found in database, + * and string which contains set of ledger sequences. + * If set of sequences was not saved than none is returned. + */ +std::pair> +selectAcquireDBLedgerSeqs(soci::session& session, std::uint32_t index); + +struct AcquireShardSeqsHash +{ + std::optional sequences; + std::optional hash; +}; + +/** + * @brief selectAcquireDBLedgerSeqsHash Returns set of acquired ledgers and + * hash for given shard. + * @param session Session with database. + * @param index Shard index. + * @return Pair which contains true of such an index found in database, + * and the AcquireShardSeqsHash structure which contains string + * with ledger sequences set and string with last ledger hash. + * If set of sequences or hash were not saved than none is returned. + */ +std::pair +selectAcquireDBLedgerSeqsHash(soci::session& session, std::uint32_t index); + +/** + * @brief updateAcquireDB Updates information in acquire DB. + * @param session Session with database. + * @param ledger Ledger to save into database. + * @param index Shard index. + * @param lastSeq Last acqyured ledger sequence. + * @param seqs Current set or acquired ledger sequences if it's not empty. + */ +void +updateAcquireDB( + soci::session& session, + std::shared_ptr const& ledger, + std::uint32_t index, + std::uint32_t lastSeq, + std::optional const& seqs); + +/* Archive DB */ + +/** + * @brief makeArchiveDB Opens shard archive DB and returns its descriptor. + * @param dir Path to database to open. + * @param dbName Name of database. + * @return Unique pointer to opened database. + */ +std::unique_ptr +makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName); + +/** + * @brief readArchiveDB Read entries from shard archive database and calls + * fiven callback for each entry. + * @param db Session with database. + * @param func Callback to call for each entry. + */ +void +readArchiveDB( + DatabaseCon& db, + std::function const& func); + +/** + * @brief insertArchiveDB Adds entry to shard archive database. + * @param db Session with database. + * @param shardIndex Shard index to add. + * @param url Shard download url to add. + */ +void +insertArchiveDB( + DatabaseCon& db, + std::uint32_t shardIndex, + std::string const& url); + +/** + * @brief deleteFromArchiveDB Deletes entry from shard archive DB. + * @param db Session with database. + * @param shardIndex Shard index to remove from DB. + */ +void +deleteFromArchiveDB(DatabaseCon& db, std::uint32_t shardIndex); + +/** + * @brief dropArchiveDB Removes table in shard archive DB. + * @param db Session with database. + */ +void +dropArchiveDB(DatabaseCon& db); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp b/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp new file mode 100644 index 0000000000..e9926a394d --- /dev/null +++ b/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp @@ -0,0 +1,271 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +class RelationalDBInterfacePostgresImp : public RelationalDBInterfacePostgres +{ +public: + RelationalDBInterfacePostgresImp( + Application& app, + Config const& config, + JobQueue& jobQueue) + : app_(app) + , j_(app_.journal("PgPool")) + , pgPool_( +#ifdef RIPPLED_REPORTING + make_PgPool( + config.section("ledger_tx_tables"), + *dynamic_cast(&app_), + j_) +#endif + ) + { + assert(config.reporting()); +#ifdef RIPPLED_REPORTING + if (config.reporting() && !config.reportingReadOnly()) // use pg + { + initSchema(pgPool_); + } +#endif + } + + void + sweep() override; + + std::optional + getMinLedgerSeq() override; + + std::optional + getMaxLedgerSeq() override; + + std::string + getCompleteLedgers() override; + + std::chrono::seconds + getValidatedLedgerAge() override; + + bool + writeLedgerAndTransactions( + LedgerInfo const& info, + std::vector const& accountTxData) override; + + std::optional + getLedgerInfoByIndex(LedgerIndex ledgerSeq) override; + + std::optional + getNewestLedgerInfo() override; + + std::optional + getLedgerInfoByHash(uint256 const& ledgerHash) override; + + uint256 + getHashByIndex(LedgerIndex ledgerIndex) override; + + std::optional + getHashesByIndex(LedgerIndex ledgerIndex) override; + + std::map + getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override; + + std::vector + getTxHashes(LedgerIndex seq) override; + + std::vector> + getTxHistory(LedgerIndex startIndex) override; + + std::pair + getAccountTx(AccountTxArgs const& args) override; + + Transaction::Locator + locateTransaction(uint256 const& id) override; + + bool + ledgerDbHasSpace(Config const& config) override; + + bool + transactionDbHasSpace(Config const& config) override; + +private: + Application& app_; + beast::Journal j_; + std::shared_ptr pgPool_; + + bool + dbHasSpace(Config const& config); +}; + +void +RelationalDBInterfacePostgresImp::sweep() +{ +#ifdef RIPPLED_REPORTING + pgPool_->idleSweeper(); +#endif +} + +std::optional +RelationalDBInterfacePostgresImp::getMinLedgerSeq() +{ + return ripple::getMinLedgerSeq(pgPool_, j_); +} + +std::optional +RelationalDBInterfacePostgresImp::getMaxLedgerSeq() +{ + return ripple::getMaxLedgerSeq(pgPool_); +} + +std::string +RelationalDBInterfacePostgresImp::getCompleteLedgers() +{ + return ripple::getCompleteLedgers(pgPool_); +} + +std::chrono::seconds +RelationalDBInterfacePostgresImp::getValidatedLedgerAge() +{ + return ripple::getValidatedLedgerAge(pgPool_, j_); +} + +bool +RelationalDBInterfacePostgresImp::writeLedgerAndTransactions( + LedgerInfo const& info, + std::vector const& accountTxData) +{ + return ripple::writeLedgerAndTransactions(pgPool_, info, accountTxData, j_); +} + +std::optional +RelationalDBInterfacePostgresImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) +{ + return ripple::getLedgerInfoByIndex(pgPool_, ledgerSeq, app_); +} + +std::optional +RelationalDBInterfacePostgresImp::getNewestLedgerInfo() +{ + return ripple::getNewestLedgerInfo(pgPool_, app_); +} + +std::optional +RelationalDBInterfacePostgresImp::getLedgerInfoByHash(uint256 const& ledgerHash) +{ + return ripple::getLedgerInfoByHash(pgPool_, ledgerHash, app_); +} + +uint256 +RelationalDBInterfacePostgresImp::getHashByIndex(LedgerIndex ledgerIndex) +{ + return ripple::getHashByIndex(pgPool_, ledgerIndex, app_); +} + +std::optional +RelationalDBInterfacePostgresImp::getHashesByIndex(LedgerIndex ledgerIndex) +{ + LedgerHashPair p; + if (!ripple::getHashesByIndex( + pgPool_, ledgerIndex, p.ledgerHash, p.parentHash, app_)) + return {}; + return p; +} + +std::map +RelationalDBInterfacePostgresImp::getHashesByIndex( + LedgerIndex minSeq, + LedgerIndex maxSeq) +{ + return ripple::getHashesByIndex(pgPool_, minSeq, maxSeq, app_); +} + +std::vector +RelationalDBInterfacePostgresImp::getTxHashes(LedgerIndex seq) +{ + return ripple::getTxHashes(pgPool_, seq, app_); +} + +std::vector> +RelationalDBInterfacePostgresImp::getTxHistory(LedgerIndex startIndex) +{ + return ripple::getTxHistory(pgPool_, startIndex, app_, j_); +} + +std::pair +RelationalDBInterfacePostgresImp::getAccountTx(AccountTxArgs const& args) +{ + return ripple::getAccountTx(pgPool_, args, app_, j_); +} + +Transaction::Locator +RelationalDBInterfacePostgresImp::locateTransaction(uint256 const& id) +{ + return ripple::locateTransaction(pgPool_, id, app_); +} + +bool +RelationalDBInterfacePostgresImp::dbHasSpace(Config const& config) +{ + /* Postgres server could be running on a different machine. */ + + return true; +} + +bool +RelationalDBInterfacePostgresImp::ledgerDbHasSpace(Config const& config) +{ + return dbHasSpace(config); +} + +bool +RelationalDBInterfacePostgresImp::transactionDbHasSpace(Config const& config) +{ + return dbHasSpace(config); +} + +std::unique_ptr +getRelationalDBInterfacePostgres( + Application& app, + Config const& config, + JobQueue& jobQueue) +{ + return std::make_unique( + app, config, jobQueue); +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.h b/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.h new file mode 100644 index 0000000000..62623920b8 --- /dev/null +++ b/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.h @@ -0,0 +1,100 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_CORE_RELATIONALDBINTERFACEPOSTGRES_H_INCLUDED +#define RIPPLE_CORE_RELATIONALDBINTERFACEPOSTGRES_H_INCLUDED + +#include + +namespace ripple { + +class RelationalDBInterfacePostgres : public RelationalDBInterface +{ +public: + /** + * @brief sweep Sweep the database. Method is specific for postgres backend. + */ + virtual void + sweep() = 0; + + /** + * @brief getCompleteLedgers Returns string which contains list of + * completed ledgers. Method is specific for postgres backend. + * @return String with completed ledger numbers + */ + virtual std::string + getCompleteLedgers() = 0; + + /** + * @brief getValidatedLedgerAge Returns age of last + * validated ledger. Method is specific for postgres backend. + * @return Age of last validated ledger in seconds + */ + virtual std::chrono::seconds + getValidatedLedgerAge() = 0; + + /** + * @brief writeLedgerAndTransactions Write new ledger and transaction data + * into database. Method is specific for Postgres backend. + * @param info Ledger info to write. + * @param accountTxData Transaction data to write + * @return True if success, false if failure. + */ + virtual bool + writeLedgerAndTransactions( + LedgerInfo const& info, + std::vector const& accountTxData) = 0; + + /** + * @brief getTxHashes Returns vector of tx hashes by given ledger + * sequence. Method is specific to postgres backend. + * @param seq Ledger sequence + * @return Vector of tx hashes + */ + virtual std::vector + getTxHashes(LedgerIndex seq) = 0; + + /** + * @brief getAccountTx Get last account transactions specifies by + * passed argumenrs structure. Function if specific to postgres + * backend. + * @param args Arguments which specify account and whose tx to return. + * @param app Application + * @param j Journal + * @return Vector of account transactions and RPC status of responce. + */ + virtual std::pair + getAccountTx(AccountTxArgs const& args) = 0; + + /** + * @brief locateTransaction Returns information used to locate + * a transaction. Function is specific to postgres backend. + * @param id Hash of the transaction. + * @return Information used to locate a transaction. Contains a nodestore + * hash and ledger sequence pair if the transaction was found. + * Otherwise, contains the range of ledgers present in the database + * at the time of search. + */ + virtual Transaction::Locator + locateTransaction(uint256 const& id) = 0; +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp b/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp new file mode 100644 index 0000000000..81d422e54f --- /dev/null +++ b/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp @@ -0,0 +1,1445 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite +{ +public: + RelationalDBInterfaceSqliteImp( + Application& app, + Config const& config, + JobQueue& jobQueue) + : app_(app), j_(app_.journal("Ledger")) + { + DatabaseCon::Setup setup = setup_DatabaseCon(config, j_); + auto res = makeLedgerDBs( + config, + setup, + DatabaseCon::CheckpointerSetup{&jobQueue, &app_.logs()}); + if (!res) + { + JLOG(app_.journal("Application").fatal()) + << "AccountTransactions database " + "should not have a primary key"; + Throw(); + } + } + + std::optional + getMinLedgerSeq() override; + + std::optional + getTransactionsMinLedgerSeq() override; + + std::optional + getAccountTransactionsMinLedgerSeq() override; + + std::optional + getMaxLedgerSeq() override; + + void + deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) override; + + void + deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) override; + + void + deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override; + + void + deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override; + + std::size_t + getTransactionCount() override; + + std::size_t + getAccountTransactionCount() override; + + RelationalDBInterface::CountMinMax + getLedgerCountMinMax() override; + + bool + saveValidatedLedger( + std::shared_ptr const& ledger, + bool current) override; + + std::optional + getLedgerInfoByIndex(LedgerIndex ledgerSeq) override; + + std::optional + getNewestLedgerInfo() override; + + std::optional + getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) override; + + std::optional + getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) override; + + std::optional + getLedgerInfoByHash(uint256 const& ledgerHash) override; + + uint256 + getHashByIndex(LedgerIndex ledgerIndex) override; + + std::optional + getHashesByIndex(LedgerIndex ledgerIndex) override; + + std::map + getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override; + + std::vector> + getTxHistory(LedgerIndex startIndex) override; + + AccountTxs + getOldestAccountTxs(AccountTxOptions const& options) override; + + AccountTxs + getNewestAccountTxs(AccountTxOptions const& options) override; + + MetaTxsList + getOldestAccountTxsB(AccountTxOptions const& options) override; + + MetaTxsList + getNewestAccountTxsB(AccountTxOptions const& options) override; + + std::pair> + oldestAccountTxPage(AccountTxPageOptions const& options) override; + + std::pair> + newestAccountTxPage(AccountTxPageOptions const& options) override; + + std::pair> + oldestAccountTxPageB(AccountTxPageOptions const& options) override; + + std::pair> + newestAccountTxPageB(AccountTxPageOptions const& options) override; + + std::variant + getTransaction( + uint256 const& id, + std::optional> const& range, + error_code_i& ec) override; + + bool + ledgerDbHasSpace(Config const& config) override; + + bool + transactionDbHasSpace(Config const& config) override; + + int + getKBUsedAll() override; + + int + getKBUsedLedger() override; + + int + getKBUsedTransaction() override; + +private: + Application& app_; + beast::Journal j_; + std::unique_ptr lgrdb_, txdb_; + + /** + * @brief makeLedgerDBs Opens node ledger and transaction databases, + * and saves its descriptors into internal variables. + * @param config Config object. + * @param setup Path to database and other opening parameters. + * @param checkpointerSetup Checkpointer parameters. + * @return True if node databases opened succsessfully. + */ + bool + makeLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup); + + /** + * @brief seqToShardIndex Converts ledgers sequence to shard index. + * @param ledgerSeq Ledger sequence. + * @return Shard index. + */ + std::uint32_t + seqToShardIndex(LedgerIndex ledgerSeq) + { + return app_.getShardStore()->seqToShardIndex(ledgerSeq); + } + + /** + * @brief firstLedgerSeq Returns first ledger sequence for given shard. + * @param shardIndex Shard Index. + * @return First ledger sequence. + */ + LedgerIndex + firstLedgerSeq(std::uint32_t shardIndex) + { + return app_.getShardStore()->firstLedgerSeq(shardIndex); + } + + /** + * @brief lastLedgerSeq Returns last ledger sequence for given shard. + * @param shardIndex Shard Index. + * @return Last ledger sequence. + */ + LedgerIndex + lastLedgerSeq(std::uint32_t shardIndex) + { + return app_.getShardStore()->lastLedgerSeq(shardIndex); + } + + /** + * @brief checkoutLedger Checks if node ledger DB exists. + * @return True if node ledger DB exists. + */ + bool + existsLedger() + { + return !!lgrdb_; + } + + /** + * @brief checkoutTransaction Checks if node transaction DB exists. + * @return True if node transaction DB exists. + */ + bool + existsTransaction() + { + return !!txdb_; + } + + /** + * @brief checkoutTransaction Checkouts and returns node ledger DB. + * @return Session to node ledger DB. + */ + auto + checkoutLedger() + { + return lgrdb_->checkoutDb(); + } + + /** + * @brief checkoutTransaction Checkouts and returns node transaction DB. + * @return Session to node transaction DB. + */ + auto + checkoutTransaction() + { + return txdb_->checkoutDb(); + } + + /** + * @brief doLedger Checkouts ledger database for shard + * containing given ledger and calls given callback function passing + * shard index and session with the database to it. + * @param ledgerSeq Ledger sequence. + * @param callback Callback function to call. + * @return Value returned by callback function. + */ + bool + doLedger( + LedgerIndex ledgerSeq, + std::function const& + callback) + { + return app_.getShardStore()->callForLedgerSQL(ledgerSeq, callback); + } + + /** + * @brief doTransaction Checkouts transaction database for shard + * containing given ledger and calls given callback function passing + * shard index and session with the database to it. + * @param ledgerSeq Ledger sequence. + * @param callback Callback function to call. + * @return Value returned by callback function. + */ + bool + doTransaction( + LedgerIndex ledgerSeq, + std::function const& + callback) + { + return app_.getShardStore()->callForTransactionSQL(ledgerSeq, callback); + } + + /** + * @brief iterateLedgerForward Checkouts ledger databases for + * all shards in ascending order starting from given shard index + * until shard with the largest index visited or callback returned + * false. For each visited shard calls given callback function + * passing shard index and session with the database to it. + * @param firstIndex Start shard index to visit or none if all shards + * should be visited. + * @param callback Callback function to call. + * @return True if each callback function returned true, false otherwise. + */ + bool + iterateLedgerForward( + std::optional firstIndex, + std::function const& + callback) + { + return app_.getShardStore()->iterateLedgerSQLsForward( + firstIndex, callback); + } + + /** + * @brief iterateTransactionForward Checkouts transaction databases for + * all shards in ascending order starting from given shard index + * until shard with the largest index visited or callback returned + * false. For each visited shard calls given callback function + * passing shard index and session with the database to it. + * @param firstIndex Start shard index to visit or none if all shards + * should be visited. + * @param callback Callback function to call. + * @return True if each callback function returned true, false otherwise. + */ + bool + iterateTransactionForward( + std::optional firstIndex, + std::function const& + callback) + { + return app_.getShardStore()->iterateLedgerSQLsForward( + firstIndex, callback); + } + + /** + * @brief iterateLedgerBack Checkouts ledger databases for + * all shards in descending order starting from given shard index + * until shard with the smallest index visited or callback returned + * false. For each visited shard calls given callback function + * passing shard index and session with the database to it. + * @param firstIndex Start shard index to visit or none if all shards + * should be visited. + * @param callback Callback function to call. + * @return True if each callback function returned true, false otherwise. + */ + bool + iterateLedgerBack( + std::optional firstIndex, + std::function const& + callback) + { + return app_.getShardStore()->iterateLedgerSQLsBack( + firstIndex, callback); + } + + /** + * @brief iterateTransactionForward Checkouts transaction databases for + * all shards in descending order starting from given shard index + * until shard with the smallest index visited or callback returned + * false. For each visited shard calls given callback function + * passing shard index and session with the database to it. + * @param firstIndex Start shard index to visit or none if all shards + * should be visited. + * @param callback Callback function to call. + * @return True if each callback function returned true, false otherwise. + */ + bool + iterateTransactionBack( + std::optional firstIndex, + std::function const& + callback) + { + return app_.getShardStore()->iterateLedgerSQLsBack( + firstIndex, callback); + } +}; + +bool +RelationalDBInterfaceSqliteImp::makeLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup) +{ + auto [lgr, tx, res] = + ripple::makeLedgerDBs(config, setup, checkpointerSetup); + txdb_ = std::move(tx); + lgrdb_ = std::move(lgr); + return res; +} + +std::optional +RelationalDBInterfaceSqliteImp::getMinLedgerSeq() +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getMinLedgerSeq(*db, TableType::Ledgers); + } + + /* else use shard databases */ + std::optional res; + iterateLedgerForward({}, [&](soci::session& session, std::uint32_t index) { + res = ripple::getMinLedgerSeq(session, TableType::Ledgers); + return !res; + }); + return res; +} + +std::optional +RelationalDBInterfaceSqliteImp::getTransactionsMinLedgerSeq() +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getMinLedgerSeq(*db, TableType::Transactions); + } + + /* else use shard databases */ + std::optional res; + iterateTransactionForward( + {}, [&](soci::session& session, std::uint32_t index) { + res = ripple::getMinLedgerSeq(session, TableType::Transactions); + return !res; + }); + return res; +} + +std::optional +RelationalDBInterfaceSqliteImp::getAccountTransactionsMinLedgerSeq() +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getMinLedgerSeq(*db, TableType::AccountTransactions); + } + + /* else use shard databases */ + std::optional res; + iterateTransactionForward( + {}, [&](soci::session& session, std::uint32_t index) { + res = ripple::getMinLedgerSeq( + session, TableType::AccountTransactions); + return !res; + }); + return res; +} + +std::optional +RelationalDBInterfaceSqliteImp::getMaxLedgerSeq() +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getMaxLedgerSeq(*db, TableType::Ledgers); + } + + /* else use shard databases */ + std::optional res; + iterateLedgerBack({}, [&](soci::session& session, std::uint32_t index) { + res = ripple::getMaxLedgerSeq(session, TableType::Ledgers); + return !res; + }); + return res; +} + +void +RelationalDBInterfaceSqliteImp::deleteTransactionByLedgerSeq( + LedgerIndex ledgerSeq) +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + ripple::deleteByLedgerSeq(*db, TableType::Transactions, ledgerSeq); + return; + } + + /* else use shard database */ + doTransaction(ledgerSeq, [&](soci::session& session, std::uint32_t index) { + ripple::deleteByLedgerSeq(session, TableType::Transactions, ledgerSeq); + return true; + }); +} + +void +RelationalDBInterfaceSqliteImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + ripple::deleteBeforeLedgerSeq(*db, TableType::Ledgers, ledgerSeq); + return; + } + + /* else use shard databases */ + iterateLedgerBack( + seqToShardIndex(ledgerSeq), + [&](soci::session& session, std::uint32_t index) { + ripple::deleteBeforeLedgerSeq( + session, TableType::Ledgers, ledgerSeq); + return true; + }); +} + +void +RelationalDBInterfaceSqliteImp::deleteTransactionsBeforeLedgerSeq( + LedgerIndex ledgerSeq) +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + ripple::deleteBeforeLedgerSeq(*db, TableType::Transactions, ledgerSeq); + return; + } + + /* else use shard databases */ + iterateTransactionBack( + seqToShardIndex(ledgerSeq), + [&](soci::session& session, std::uint32_t index) { + ripple::deleteBeforeLedgerSeq( + session, TableType::Transactions, ledgerSeq); + return true; + }); +} + +void +RelationalDBInterfaceSqliteImp::deleteAccountTransactionsBeforeLedgerSeq( + LedgerIndex ledgerSeq) +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + ripple::deleteBeforeLedgerSeq( + *db, TableType::AccountTransactions, ledgerSeq); + return; + } + + /* else use shard databases */ + iterateTransactionBack( + seqToShardIndex(ledgerSeq), + [&](soci::session& session, std::uint32_t index) { + ripple::deleteBeforeLedgerSeq( + session, TableType::AccountTransactions, ledgerSeq); + return true; + }); +} + +std::size_t +RelationalDBInterfaceSqliteImp::getTransactionCount() +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getRows(*db, TableType::Transactions); + } + + /* else use shard databases */ + std::size_t rows = 0; + iterateTransactionForward( + {}, [&](soci::session& session, std::uint32_t index) { + rows += ripple::getRows(session, TableType::Transactions); + return true; + }); + return rows; +} + +std::size_t +RelationalDBInterfaceSqliteImp::getAccountTransactionCount() +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getRows(*db, TableType::AccountTransactions); + } + + /* else use shard databases */ + std::size_t rows = 0; + iterateTransactionForward( + {}, [&](soci::session& session, std::uint32_t index) { + rows += ripple::getRows(session, TableType::AccountTransactions); + return true; + }); + return rows; +} + +RelationalDBInterface::CountMinMax +RelationalDBInterfaceSqliteImp::getLedgerCountMinMax() +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getRowsMinMax(*db, TableType::Ledgers); + } + + /* else use shard databases */ + CountMinMax res{0, 0, 0}; + iterateLedgerForward({}, [&](soci::session& session, std::uint32_t index) { + auto r = ripple::getRowsMinMax(session, TableType::Ledgers); + if (r.numberOfRows) + { + res.numberOfRows += r.numberOfRows; + if (res.minLedgerSequence == 0) + res.minLedgerSequence = r.minLedgerSequence; + res.maxLedgerSequence = r.maxLedgerSequence; + } + return true; + }); + return res; +} + +bool +RelationalDBInterfaceSqliteImp::saveValidatedLedger( + std::shared_ptr const& ledger, + bool current) +{ + /* if databases exists, use it */ + if (existsLedger() && existsTransaction()) + { + return ripple::saveValidatedLedger( + *lgrdb_, *txdb_, app_, ledger, current); + } + + /* Todo: use shard databases. Skipped in this PR by propose of Mickey + * Portilla. */ + + return false; +} + +std::optional +RelationalDBInterfaceSqliteImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getLedgerInfoByIndex(*db, ledgerSeq, j_); + } + + /* else use shard database */ + std::optional res; + doLedger(ledgerSeq, [&](soci::session& session, std::uint32_t index) { + res = ripple::getLedgerInfoByIndex(session, ledgerSeq, j_); + return true; + }); + return res; +} + +std::optional +RelationalDBInterfaceSqliteImp::getNewestLedgerInfo() +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getNewestLedgerInfo(*db, j_); + } + + /* else use shard databases */ + std::optional res; + iterateLedgerBack({}, [&](soci::session& session, std::uint32_t index) { + if (auto info = ripple::getNewestLedgerInfo(session, j_)) + { + res = info; + return false; + } + return true; + }); + + return res; +} + +std::optional +RelationalDBInterfaceSqliteImp::getLimitedOldestLedgerInfo( + LedgerIndex ledgerFirstIndex) +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getLimitedOldestLedgerInfo(*db, ledgerFirstIndex, j_); + } + + /* else use shard databases */ + std::optional res; + iterateLedgerForward( + seqToShardIndex(ledgerFirstIndex), + [&](soci::session& session, std::uint32_t index) { + if (auto info = ripple::getLimitedOldestLedgerInfo( + session, ledgerFirstIndex, j_)) + { + res = info; + return false; + } + return true; + }); + + return res; +} + +std::optional +RelationalDBInterfaceSqliteImp::getLimitedNewestLedgerInfo( + LedgerIndex ledgerFirstIndex) +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getLimitedNewestLedgerInfo(*db, ledgerFirstIndex, j_); + } + + /* else use shard databases */ + std::optional res; + iterateLedgerBack({}, [&](soci::session& session, std::uint32_t index) { + if (auto info = ripple::getLimitedNewestLedgerInfo( + session, ledgerFirstIndex, j_)) + { + res = info; + return false; + } + if (index < seqToShardIndex(ledgerFirstIndex)) + return false; + return true; + }); + + return res; +} + +std::optional +RelationalDBInterfaceSqliteImp::getLedgerInfoByHash(uint256 const& ledgerHash) +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getLedgerInfoByHash(*db, ledgerHash, j_); + } + + /* else use shard databases */ + std::optional res; + iterateLedgerBack({}, [&](soci::session& session, std::uint32_t index) { + if (auto info = ripple::getLedgerInfoByHash(session, ledgerHash, j_)) + { + res = info; + return false; + } + return true; + }); + + return res; +} + +uint256 +RelationalDBInterfaceSqliteImp::getHashByIndex(LedgerIndex ledgerIndex) +{ + /* if database exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getHashByIndex(*db, ledgerIndex); + } + + /* else use shard database */ + uint256 hash; + doLedger(ledgerIndex, [&](soci::session& session, std::uint32_t index) { + hash = ripple::getHashByIndex(session, ledgerIndex); + return true; + }); + return hash; +} + +std::optional +RelationalDBInterfaceSqliteImp::getHashesByIndex(LedgerIndex ledgerIndex) +{ + /* if database exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getHashesByIndex(*db, ledgerIndex, j_); + } + + /* else use shard database */ + std::optional res; + doLedger(ledgerIndex, [&](soci::session& session, std::uint32_t index) { + res = ripple::getHashesByIndex(session, ledgerIndex, j_); + return true; + }); + return res; +} + +std::map +RelationalDBInterfaceSqliteImp::getHashesByIndex( + LedgerIndex minSeq, + LedgerIndex maxSeq) +{ + /* if database exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::getHashesByIndex(*db, minSeq, maxSeq, j_); + } + + /* else use shard databases */ + std::map res; + while (minSeq <= maxSeq) + { + LedgerIndex shardMaxSeq = lastLedgerSeq(seqToShardIndex(minSeq)); + if (shardMaxSeq > maxSeq) + shardMaxSeq = maxSeq; + doLedger(minSeq, [&](soci::session& session, std::uint32_t index) { + auto r = ripple::getHashesByIndex(session, minSeq, shardMaxSeq, j_); + res.insert(r.begin(), r.end()); + return true; + }); + minSeq = shardMaxSeq + 1; + } + + return res; +} + +std::vector> +RelationalDBInterfaceSqliteImp::getTxHistory(LedgerIndex startIndex) +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getTxHistory(*db, app_, startIndex, 20, false).first; + } + + /* else use shard databases */ + std::vector> txs; + int quantity = 20; + iterateTransactionBack( + {}, [&](soci::session& session, std::uint32_t index) { + auto [tx, total] = + ripple::getTxHistory(session, app_, startIndex, quantity, true); + txs.insert(txs.end(), tx.begin(), tx.end()); + if (total > 0) + { + quantity -= total; + if (quantity <= 0) + return false; + startIndex = 0; + } + else + { + startIndex += total; + } + return true; + }); + + return txs; +} + +RelationalDBInterface::AccountTxs +RelationalDBInterfaceSqliteImp::getOldestAccountTxs( + AccountTxOptions const& options) +{ + LedgerMaster& ledgerMaster = app_.getLedgerMaster(); + + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getOldestAccountTxs( + *db, app_, ledgerMaster, options, {}, j_) + .first; + } + + /* else use shard databases */ + AccountTxs ret; + AccountTxOptions opt = options; + int limit_used = 0; + iterateTransactionForward( + opt.minLedger ? seqToShardIndex(opt.minLedger) + : std::optional(), + [&](soci::session& session, std::uint32_t index) { + if (opt.maxLedger && index > seqToShardIndex(opt.maxLedger)) + return false; + auto [r, total] = ripple::getOldestAccountTxs( + session, app_, ledgerMaster, opt, limit_used, j_); + ret.insert(ret.end(), r.begin(), r.end()); + if (!total) + return false; + if (total > 0) + { + limit_used += total; + opt.offset = 0; + } + else + { + total = ~total; + if (opt.offset <= total) + opt.offset = 0; + else + opt.offset -= total; + } + return true; + }); + + return ret; +} + +RelationalDBInterface::AccountTxs +RelationalDBInterfaceSqliteImp::getNewestAccountTxs( + AccountTxOptions const& options) +{ + LedgerMaster& ledgerMaster = app_.getLedgerMaster(); + + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getNewestAccountTxs( + *db, app_, ledgerMaster, options, {}, j_) + .first; + } + + /* else use shard databases */ + AccountTxs ret; + AccountTxOptions opt = options; + int limit_used = 0; + iterateTransactionBack( + opt.maxLedger ? seqToShardIndex(opt.maxLedger) + : std::optional(), + [&](soci::session& session, std::uint32_t index) { + if (opt.minLedger && index < seqToShardIndex(opt.minLedger)) + return false; + auto [r, total] = ripple::getNewestAccountTxs( + session, app_, ledgerMaster, opt, limit_used, j_); + ret.insert(ret.end(), r.begin(), r.end()); + if (!total) + return false; + if (total > 0) + { + limit_used += total; + opt.offset = 0; + } + else + { + total = ~total; + if (opt.offset <= total) + opt.offset = 0; + else + opt.offset -= total; + } + return true; + }); + + return ret; +} + +RelationalDBInterface::MetaTxsList +RelationalDBInterfaceSqliteImp::getOldestAccountTxsB( + AccountTxOptions const& options) +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getOldestAccountTxsB(*db, app_, options, {}, j_).first; + } + + /* else use shard databases */ + MetaTxsList ret; + AccountTxOptions opt = options; + int limit_used = 0; + iterateTransactionForward( + opt.minLedger ? seqToShardIndex(opt.minLedger) + : std::optional(), + [&](soci::session& session, std::uint32_t index) { + if (opt.maxLedger && index > seqToShardIndex(opt.maxLedger)) + return false; + auto [r, total] = ripple::getOldestAccountTxsB( + session, app_, opt, limit_used, j_); + ret.insert(ret.end(), r.begin(), r.end()); + if (!total) + return false; + if (total > 0) + { + limit_used += total; + opt.offset = 0; + } + else + { + total = ~total; + if (opt.offset <= total) + opt.offset = 0; + else + opt.offset -= total; + } + return true; + }); + + return ret; +} + +RelationalDBInterface::MetaTxsList +RelationalDBInterfaceSqliteImp::getNewestAccountTxsB( + AccountTxOptions const& options) +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getNewestAccountTxsB(*db, app_, options, {}, j_).first; + } + + /* else use shard databases */ + MetaTxsList ret; + AccountTxOptions opt = options; + int limit_used = 0; + iterateTransactionBack( + opt.maxLedger ? seqToShardIndex(opt.maxLedger) + : std::optional(), + [&](soci::session& session, std::uint32_t index) { + if (opt.minLedger && index < seqToShardIndex(opt.minLedger)) + return false; + auto [r, total] = ripple::getNewestAccountTxsB( + session, app_, opt, limit_used, j_); + ret.insert(ret.end(), r.begin(), r.end()); + if (!total) + return false; + if (total > 0) + { + limit_used += total; + opt.offset = 0; + } + else + { + total = ~total; + if (opt.offset <= total) + opt.offset = 0; + else + opt.offset -= total; + } + return true; + }); + + return ret; +} + +std::pair< + RelationalDBInterface::AccountTxs, + std::optional> +RelationalDBInterfaceSqliteImp::oldestAccountTxPage( + AccountTxPageOptions const& options) +{ + static std::uint32_t const page_length(200); + auto& idCache = app_.accountIDCache(); + auto onUnsavedLedger = + std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1); + AccountTxs ret; + Application& app = app_; + auto onTransaction = [&ret, &app]( + std::uint32_t ledger_index, + std::string const& status, + Blob&& rawTxn, + Blob&& rawMeta) { + convertBlobsToTxResult(ret, ledger_index, status, rawTxn, rawMeta, app); + }; + + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + auto newmarker = ripple::oldestAccountTxPage( + *db, + idCache, + onUnsavedLedger, + onTransaction, + options, + 0, + page_length) + .first; + return {ret, newmarker}; + } + + /* else use shard databases */ + AccountTxPageOptions opt = options; + int limit_used = 0; + iterateTransactionForward( + opt.minLedger ? seqToShardIndex(opt.minLedger) + : std::optional(), + [&](soci::session& session, std::uint32_t index) { + if (opt.maxLedger != UINT32_MAX && + index > seqToShardIndex(opt.minLedger)) + return false; + auto [marker, total] = ripple::oldestAccountTxPage( + session, + idCache, + onUnsavedLedger, + onTransaction, + opt, + limit_used, + page_length); + opt.marker = marker; + if (total < 0) + return false; + limit_used += total; + return true; + }); + + return {ret, opt.marker}; +} + +std::pair< + RelationalDBInterface::AccountTxs, + std::optional> +RelationalDBInterfaceSqliteImp::newestAccountTxPage( + AccountTxPageOptions const& options) +{ + static std::uint32_t const page_length(200); + auto& idCache = app_.accountIDCache(); + auto onUnsavedLedger = + std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1); + AccountTxs ret; + Application& app = app_; + auto onTransaction = [&ret, &app]( + std::uint32_t ledger_index, + std::string const& status, + Blob&& rawTxn, + Blob&& rawMeta) { + convertBlobsToTxResult(ret, ledger_index, status, rawTxn, rawMeta, app); + }; + + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + auto newmarker = ripple::newestAccountTxPage( + *db, + idCache, + onUnsavedLedger, + onTransaction, + options, + 0, + page_length) + .first; + return {ret, newmarker}; + } + + /* else use shard databases */ + AccountTxPageOptions opt = options; + int limit_used = 0; + iterateTransactionBack( + opt.maxLedger != UINT32_MAX ? seqToShardIndex(opt.maxLedger) + : std::optional(), + [&](soci::session& session, std::uint32_t index) { + if (opt.minLedger && index < seqToShardIndex(opt.minLedger)) + return false; + auto [marker, total] = ripple::newestAccountTxPage( + session, + idCache, + onUnsavedLedger, + onTransaction, + opt, + limit_used, + page_length); + opt.marker = marker; + if (total < 0) + return false; + limit_used += total; + return true; + }); + + return {ret, opt.marker}; +} + +std::pair< + RelationalDBInterface::MetaTxsList, + std::optional> +RelationalDBInterfaceSqliteImp::oldestAccountTxPageB( + AccountTxPageOptions const& options) +{ + static std::uint32_t const page_length(500); + auto& idCache = app_.accountIDCache(); + auto onUnsavedLedger = + std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1); + MetaTxsList ret; + auto onTransaction = [&ret]( + std::uint32_t ledgerIndex, + std::string const& status, + Blob&& rawTxn, + Blob&& rawMeta) { + ret.emplace_back(std::move(rawTxn), std::move(rawMeta), ledgerIndex); + }; + + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + auto newmarker = ripple::oldestAccountTxPage( + *db, + idCache, + onUnsavedLedger, + onTransaction, + options, + 0, + page_length) + .first; + return {ret, newmarker}; + } + + /* else use shard databases */ + AccountTxPageOptions opt = options; + int limit_used = 0; + iterateTransactionForward( + opt.minLedger ? seqToShardIndex(opt.minLedger) + : std::optional(), + [&](soci::session& session, std::uint32_t index) { + if (opt.maxLedger != UINT32_MAX && + index > seqToShardIndex(opt.minLedger)) + return false; + auto [marker, total] = ripple::oldestAccountTxPage( + session, + idCache, + onUnsavedLedger, + onTransaction, + opt, + limit_used, + page_length); + opt.marker = marker; + if (total < 0) + return false; + limit_used += total; + return true; + }); + + return {ret, opt.marker}; +} + +std::pair< + RelationalDBInterface::MetaTxsList, + std::optional> +RelationalDBInterfaceSqliteImp::newestAccountTxPageB( + AccountTxPageOptions const& options) +{ + static std::uint32_t const page_length(500); + auto& idCache = app_.accountIDCache(); + auto onUnsavedLedger = + std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1); + MetaTxsList ret; + auto onTransaction = [&ret]( + std::uint32_t ledgerIndex, + std::string const& status, + Blob&& rawTxn, + Blob&& rawMeta) { + ret.emplace_back(std::move(rawTxn), std::move(rawMeta), ledgerIndex); + }; + + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + auto newmarker = ripple::newestAccountTxPage( + *db, + idCache, + onUnsavedLedger, + onTransaction, + options, + 0, + page_length) + .first; + return {ret, newmarker}; + } + + /* else use shard databases */ + AccountTxPageOptions opt = options; + int limit_used = 0; + iterateTransactionBack( + opt.maxLedger != UINT32_MAX ? seqToShardIndex(opt.maxLedger) + : std::optional(), + [&](soci::session& session, std::uint32_t index) { + if (opt.minLedger && index < seqToShardIndex(opt.minLedger)) + return false; + auto [marker, total] = ripple::newestAccountTxPage( + session, + idCache, + onUnsavedLedger, + onTransaction, + opt, + limit_used, + page_length); + opt.marker = marker; + if (total < 0) + return false; + limit_used += total; + return true; + }); + + return {ret, opt.marker}; +} + +std::variant +RelationalDBInterfaceSqliteImp::getTransaction( + uint256 const& id, + std::optional> const& range, + error_code_i& ec) +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::getTransaction(*db, app_, id, range, ec); + } + + /* else use shard databases */ + std::variant res(TxSearched::unknown); + iterateTransactionBack( + {}, [&](soci::session& session, std::uint32_t index) { + std::optional> range1; + if (range) + { + uint32_t low = std::max(range->lower(), firstLedgerSeq(index)); + uint32_t high = std::min(range->upper(), lastLedgerSeq(index)); + if (low <= high) + range1 = ClosedInterval(low, high); + } + res = ripple::getTransaction(session, app_, id, range1, ec); + /* finish iterations if transaction found or error detected */ + return res.index() == 1 && + std::get(res) != TxSearched::unknown; + }); + + return res; +} + +bool +RelationalDBInterfaceSqliteImp::ledgerDbHasSpace(Config const& config) +{ + /* if databases exists, use it */ + if (existsLedger()) + { + auto db = checkoutLedger(); + return ripple::dbHasSpace(*db, config, j_); + } + + /* else use shard databases */ + return iterateLedgerBack( + {}, [&](soci::session& session, std::uint32_t index) { + return ripple::dbHasSpace(session, config, j_); + }); +} + +bool +RelationalDBInterfaceSqliteImp::transactionDbHasSpace(Config const& config) +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + auto db = checkoutTransaction(); + return ripple::dbHasSpace(*db, config, j_); + } + + /* else use shard databases */ + return iterateTransactionBack( + {}, [&](soci::session& session, std::uint32_t index) { + return ripple::dbHasSpace(session, config, j_); + }); +} + +int +RelationalDBInterfaceSqliteImp::getKBUsedAll() +{ + /* if databases exists, use it */ + if (existsLedger()) + { + return ripple::getKBUsedAll(lgrdb_->getSession()); + } + + /* else use shard databases */ + int sum = 0; + iterateLedgerBack({}, [&](soci::session& session, std::uint32_t index) { + sum += ripple::getKBUsedAll(session); + return true; + }); + return sum; +} + +int +RelationalDBInterfaceSqliteImp::getKBUsedLedger() +{ + /* if databases exists, use it */ + if (existsLedger()) + { + return ripple::getKBUsedDB(lgrdb_->getSession()); + } + + /* else use shard databases */ + int sum = 0; + iterateLedgerBack({}, [&](soci::session& session, std::uint32_t index) { + sum += ripple::getKBUsedDB(session); + return true; + }); + return sum; +} + +int +RelationalDBInterfaceSqliteImp::getKBUsedTransaction() +{ + /* if databases exists, use it */ + if (existsTransaction()) + { + return ripple::getKBUsedDB(txdb_->getSession()); + } + + /* else use shard databases */ + int sum = 0; + iterateTransactionBack( + {}, [&](soci::session& session, std::uint32_t index) { + sum += ripple::getKBUsedDB(session); + return true; + }); + return sum; +} + +std::unique_ptr +getRelationalDBInterfaceSqlite( + Application& app, + Config const& config, + JobQueue& jobQueue) +{ + return std::make_unique( + app, config, jobQueue); +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h b/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h new file mode 100644 index 0000000000..bb2dd0e41d --- /dev/null +++ b/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h @@ -0,0 +1,290 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_CORE_RELATIONALDBINTERFACESQLITE_H_INCLUDED +#define RIPPLE_CORE_RELATIONALDBINTERFACESQLITE_H_INCLUDED + +#include + +namespace ripple { + +class RelationalDBInterfaceSqlite : public RelationalDBInterface +{ +public: + /** + * @brief getTransactionsMinLedgerSeq Returns minimum ledger sequence + * among records in the Transactions table. + * @return Ledger sequence or none if no ledgers exist. + */ + virtual std::optional + getTransactionsMinLedgerSeq() = 0; + + /** + * @brief getAccountTransactionsMinLedgerSeq Returns minimum ledger + * sequence among records in the AccountTransactions table. + * @return Ledger sequence or none if no ledgers exist. + */ + virtual std::optional + getAccountTransactionsMinLedgerSeq() = 0; + + /** + * @brief deleteTransactionByLedgerSeq Deletes transactions from ledger + * with given sequence. + * @param ledgerSeq Ledger sequence. + */ + virtual void + deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) = 0; + + /** + * @brief deleteBeforeLedgerSeq Deletes all ledgers with given sequence + * and all sequences below it. + * @param ledgerSeq Ledger sequence. + */ + virtual void + deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; + + /** + * @brief deleteTransactionsBeforeLedgerSeq Deletes all transactions with + * given ledger sequence and all sequences below it. + * @param ledgerSeq Ledger sequence. + */ + virtual void + deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; + + /** + * @brief deleteAccountTransactionsBeforeLedgerSeq Deletes all account + * transactions with given ledger sequence and all sequences + * below it. + * @param ledgerSeq Ledger sequence. + */ + virtual void + deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; + + /** + * @brief getTransactionCount Returns number of transactions. + * @return Number of transactions. + */ + virtual std::size_t + getTransactionCount() = 0; + + /** + * @brief getAccountTransactionCount Returns number of account + * transactions. + * @return Number of account transactions. + */ + virtual std::size_t + getAccountTransactionCount() = 0; + + /** + * @brief getLedgerCountMinMax Returns minumum ledger sequence, + * maximum ledger sequence and total number of saved ledgers. + * @return Struct CountMinMax which contain minimum sequence, + * maximum sequence and number of ledgers. + */ + virtual struct CountMinMax + getLedgerCountMinMax() = 0; + + /** + * @brief saveValidatedLedger Saves ledger into database. + * @param ledger The ledger. + * @param current True if ledger is current. + * @return True is saving was successfull. + */ + virtual bool + saveValidatedLedger( + std::shared_ptr const& ledger, + bool current) = 0; + + /** + * @brief getLimitedOldestLedgerInfo Returns info of oldest ledger + * from ledgers with sequences greater or equal to given. + * @param ledgerFirstIndex Minimum ledger sequence. + * @return Ledger info or none if ledger not found. + */ + virtual std::optional + getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) = 0; + + /** + * @brief getLimitedNewestLedgerInfo Returns info of newest ledger + * from ledgers with sequences greater or equal to given. + * @param ledgerFirstIndex Minimum ledger sequence. + * @return Ledger info or none if ledger not found. + */ + virtual std::optional + getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) = 0; + + /** + * @brief getOldestAccountTxs Returns oldest transactions for given + * account which match given criteria starting from given offset. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @return Vector of pairs of found transactions and their metadata + * sorted in ascending order by account sequence. + */ + virtual AccountTxs + getOldestAccountTxs(AccountTxOptions const& options) = 0; + + /** + * @brief getNewestAccountTxs Returns newest transactions for given + * account which match given criteria starting from given offset. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @return Vector of pairs of found transactions and their metadata + * sorted in descending order by account sequence. + */ + virtual AccountTxs + getNewestAccountTxs(AccountTxOptions const& options) = 0; + + /** + * @brief getOldestAccountTxsB Returns oldest transactions in binary form + * for given account which match given criteria starting from given + * offset. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in ascending order by account sequence. + */ + virtual MetaTxsList + getOldestAccountTxsB(AccountTxOptions const& options) = 0; + + /** + * @brief getNewestAccountTxsB Returns newest transactions in binary form + * for given account which match given criteria starting from given + * offset. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in descending order by account + * sequence. + */ + virtual MetaTxsList + getNewestAccountTxsB(AccountTxOptions const& options) = 0; + + /** + * @brief oldestAccountTxPage Returns oldest transactions for given + * account which match given criteria starting from given marker. + * @param options Struct AccountTxPageOptions which contain criteria to + * match: the account, minimum and maximum ledger numbers to search, + * marker of first returned entry, number of transactions to return, + * flag if this number unlimited. + * @return Vector of pairs of found transactions and their metadata + * sorted in ascending order by account sequence and marker + * for next search if search not finished. + */ + virtual std::pair> + oldestAccountTxPage(AccountTxPageOptions const& options) = 0; + + /** + * @brief newestAccountTxPage Returns newest transactions for given + * account which match given criteria starting from given marker. + * @param options Struct AccountTxPageOptions which contain criteria to + * match: the account, minimum and maximum ledger numbers to search, + * marker of first returned entry, number of transactions to return, + * flag if this number unlimited. + * @return Vector of pairs of found transactions and their metadata + * sorted in descending order by account sequence and marker + * for next search if search not finished. + */ + virtual std::pair> + newestAccountTxPage(AccountTxPageOptions const& options) = 0; + + /** + * @brief oldestAccountTxPageB Returns oldest transactions in binary form + * for given account which match given criteria starting from given + * marker. + * @param options Struct AccountTxPageOptions which contain criteria to + * match: the account, minimum and maximum ledger numbers to search, + * marker of first returned entry, number of transactions to return, + * flag if this number unlimited. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in ascending order by account + * sequence and marker for next search if search not finished. + */ + virtual std::pair> + oldestAccountTxPageB(AccountTxPageOptions const& options) = 0; + + /** + * @brief newestAccountTxPageB Returns newest transactions in binary form + * for given account which match given criteria starting from given + * marker. + * @param options Struct AccountTxPageOptions which contain criteria to + * match: the account, minimum and maximum ledger numbers to search, + * marker of first returned entry, number of transactions to return, + * flag if this number unlimited. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in descending order by account + * sequence and marker for next search if search not finished. + */ + virtual std::pair> + newestAccountTxPageB(AccountTxPageOptions const& options) = 0; + + /** + * @brief getTransaction Returns transaction with given hash. If not found + * and range given then check if all ledgers from the range are + * present in the database. + * @param id Hash of the transaction. + * @param range Range of ledgers to check, if present. + * @param ec Default value of error code. + * @return Transaction and its metadata if found, TxSearched::all if range + * given and all ledgers from range are present in the database, + * TxSearched::some if range given and not all ledgers are present, + * TxSearched::unknown if range not given or deserializing error + * occured. In the last case error code returned via ec link + * parameter, in other cases default error code not changed. + */ + virtual std::variant + getTransaction( + uint256 const& id, + std::optional> const& range, + error_code_i& ec) = 0; + + /** + * @brief getKBUsedAll Returns space used by all databases. + * @return Space in kilobytes. + */ + virtual int + getKBUsedAll() = 0; + + /** + * @brief getKBUsedLedger Returns space used by ledger database. + * @return Space in kilobytes. + */ + virtual int + getKBUsedLedger() = 0; + + /** + * @brief getKBUsedTransaction Returns space used by transaction + * database. + * @return Space in kilobytes. + */ + virtual int + getKBUsedTransaction() = 0; +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface.cpp b/src/ripple/app/rdb/impl/RelationalDBInterface.cpp new file mode 100644 index 0000000000..bf49bf3c95 --- /dev/null +++ b/src/ripple/app/rdb/impl/RelationalDBInterface.cpp @@ -0,0 +1,87 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +namespace ripple { + +extern std::unique_ptr +getRelationalDBInterfaceSqlite( + Application& app, + Config const& config, + JobQueue& jobQueue); + +extern std::unique_ptr +getRelationalDBInterfacePostgres( + Application& app, + Config const& config, + JobQueue& jobQueue); + +std::unique_ptr +RelationalDBInterface::init( + Application& app, + Config const& config, + JobQueue& jobQueue) +{ + bool use_sqlite = false; + bool use_postgres = false; + + if (config.reporting()) + { + use_postgres = true; + } + else + { + const Section& rdb_section{config.section(SECTION_RELATIONAL_DB)}; + if (!rdb_section.empty()) + { + if (boost::iequals( + get(rdb_section, "backend"), "sqlite")) + { + use_sqlite = true; + } + else + { + Throw( + "Invalid rdb_section backend value: " + + get(rdb_section, "backend")); + } + } + else + { + use_sqlite = true; + } + } + + if (use_sqlite) + { + return getRelationalDBInterfaceSqlite(app, config, jobQueue); + } + else if (use_postgres) + { + return getRelationalDBInterfacePostgres(app, config, jobQueue); + } + + return std::unique_ptr(); +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp b/src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp new file mode 100644 index 0000000000..af98cac0f7 --- /dev/null +++ b/src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp @@ -0,0 +1,830 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +/* Wallet DB */ + +std::unique_ptr +makeWalletDB(DatabaseCon::Setup const& setup) +{ + // wallet database + return std::make_unique( + setup, WalletDBName, std::array(), WalletDBInit); +} + +std::unique_ptr +makeTestWalletDB(DatabaseCon::Setup const& setup, std::string const& dbname) +{ + // wallet database + return std::make_unique( + setup, dbname.data(), std::array(), WalletDBInit); +} + +void +getManifests( + soci::session& session, + std::string const& dbTable, + ManifestCache& mCache, + beast::Journal j) +{ + // Load manifests stored in database + std::string const sql = "SELECT RawData FROM " + dbTable + ";"; + soci::blob sociRawData(session); + soci::statement st = (session.prepare << sql, soci::into(sociRawData)); + st.execute(); + while (st.fetch()) + { + std::string serialized; + convert(sociRawData, serialized); + if (auto mo = deserializeManifest(serialized)) + { + if (!mo->verify()) + { + JLOG(j.warn()) << "Unverifiable manifest in db"; + continue; + } + + mCache.applyManifest(std::move(*mo)); + } + else + { + JLOG(j.warn()) << "Malformed manifest in database"; + } + } +} + +static void +saveManifest( + soci::session& session, + std::string const& dbTable, + std::string const& serialized) +{ + // soci does not support bulk insertion of blob data + // Do not reuse blob because manifest ecdsa signatures vary in length + // but blob write length is expected to be >= the last write + soci::blob rawData(session); + convert(serialized, rawData); + session << "INSERT INTO " << dbTable << " (RawData) VALUES (:rawData);", + soci::use(rawData); +} + +void +saveManifests( + soci::session& session, + std::string const& dbTable, + std::function isTrusted, + hash_map const& map, + beast::Journal j) +{ + soci::transaction tr(session); + session << "DELETE FROM " << dbTable; + for (auto const& v : map) + { + // Save all revocation manifests, + // but only save trusted non-revocation manifests. + if (!v.second.revoked() && !isTrusted(v.second.masterKey)) + { + JLOG(j.info()) << "Untrusted manifest in cache not saved to db"; + continue; + } + + saveManifest(session, dbTable, v.second.serialized); + } + tr.commit(); +} + +void +addValidatorManifest(soci::session& session, std::string const& serialized) +{ + soci::transaction tr(session); + saveManifest(session, "ValidatorManifests", serialized); + tr.commit(); +} + +std::pair +getNodeIdentity(soci::session& session) +{ + { + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional pubKO, priKO; + soci::statement st = + (session.prepare + << "SELECT PublicKey, PrivateKey FROM NodeIdentity;", + soci::into(pubKO), + soci::into(priKO)); + st.execute(); + while (st.fetch()) + { + auto const sk = parseBase58( + TokenType::NodePrivate, priKO.value_or("")); + auto const pk = parseBase58( + TokenType::NodePublic, pubKO.value_or("")); + + // Only use if the public and secret keys are a pair + if (sk && pk && (*pk == derivePublicKey(KeyType::secp256k1, *sk))) + return {*pk, *sk}; + } + } + + // If a valid identity wasn't found, we randomly generate a new one: + auto [newpublicKey, newsecretKey] = randomKeyPair(KeyType::secp256k1); + + session << str( + boost::format("INSERT INTO NodeIdentity (PublicKey,PrivateKey) " + "VALUES ('%s','%s');") % + toBase58(TokenType::NodePublic, newpublicKey) % + toBase58(TokenType::NodePrivate, newsecretKey)); + + return {newpublicKey, newsecretKey}; +} + +std::unordered_set, KeyEqual> +getPeerReservationTable(soci::session& session, beast::Journal j) +{ + std::unordered_set, KeyEqual> table; + // These values must be boost::optionals (not std) because SOCI expects + // boost::optionals. + boost::optional valPubKey, valDesc; + // We should really abstract the table and column names into constants, + // but no one else does. Because it is too tedious? It would be easy if we + // had a jOOQ for C++. + soci::statement st = + (session.prepare + << "SELECT PublicKey, Description FROM PeerReservations;", + soci::into(valPubKey), + soci::into(valDesc)); + st.execute(); + while (st.fetch()) + { + if (!valPubKey || !valDesc) + { + // This represents a `NULL` in a `NOT NULL` column. It should be + // unreachable. + continue; + } + auto const optNodeId = + parseBase58(TokenType::NodePublic, *valPubKey); + if (!optNodeId) + { + JLOG(j.warn()) << "load: not a public key: " << valPubKey; + continue; + } + table.insert(PeerReservation{*optNodeId, *valDesc}); + } + + return table; +} + +void +insertPeerReservation( + soci::session& session, + PublicKey const& nodeId, + std::string const& description) +{ + session << "INSERT INTO PeerReservations (PublicKey, Description) " + "VALUES (:nodeId, :desc) " + "ON CONFLICT (PublicKey) DO UPDATE SET " + "Description=excluded.Description", + soci::use(toBase58(TokenType::NodePublic, nodeId)), + soci::use(description); +} + +void +deletePeerReservation(soci::session& session, PublicKey const& nodeId) +{ + session << "DELETE FROM PeerReservations WHERE PublicKey = :nodeId", + soci::use(toBase58(TokenType::NodePublic, nodeId)); +} + +bool +createFeatureVotes(soci::session& session) +{ + soci::transaction tr(session); + std::string sql = + "SELECT count(*) FROM sqlite_master " + "WHERE type='table' AND name='FeatureVotes'"; + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional featureVotesCount; + session << sql, soci::into(featureVotesCount); + bool exists = static_cast(*featureVotesCount); + + // Create FeatureVotes table in WalletDB if it doesn't exist + if (!exists) + { + session << "CREATE TABLE FeatureVotes ( " + "AmendmentHash CHARACTER(64) NOT NULL, " + "AmendmentName TEXT, " + "Veto INTEGER NOT NULL );"; + tr.commit(); + } + return exists; +} + +void +readAmendments( + soci::session& session, + std::function amendment_hash, + boost::optional amendment_name, + boost::optional vote_to_veto)> const& callback) +{ + soci::transaction tr(session); + std::string sql = + "SELECT AmendmentHash, AmendmentName, Veto FROM FeatureVotes"; + // SOCI requires boost::optional (not std::optional) as parameters. + boost::optional amendment_hash; + boost::optional amendment_name; + boost::optional vote_to_veto; + soci::statement st = + (session.prepare << sql, + soci::into(amendment_hash), + soci::into(amendment_name), + soci::into(vote_to_veto)); + st.execute(); + while (st.fetch()) + { + callback(amendment_hash, amendment_name, vote_to_veto); + } +} + +void +voteAmendment( + soci::session& session, + uint256 const& amendment, + std::string const& name, + bool vote_to_veto) +{ + soci::transaction tr(session); + std::string sql = + "INSERT INTO FeatureVotes (AmendmentHash, AmendmentName, Veto) VALUES " + "('"; + sql += to_string(amendment); + sql += "', '" + name; + sql += "', '" + std::to_string(int{vote_to_veto}) + "');"; + session << sql; + tr.commit(); +} + +/* State DB */ + +void +initStateDB( + soci::session& session, + BasicConfig const& config, + std::string const& dbName) +{ + open(session, config, dbName); + + session << "PRAGMA synchronous=FULL;"; + + session << "CREATE TABLE IF NOT EXISTS DbState (" + " Key INTEGER PRIMARY KEY," + " WritableDb TEXT," + " ArchiveDb TEXT," + " LastRotatedLedger INTEGER" + ");"; + + session << "CREATE TABLE IF NOT EXISTS CanDelete (" + " Key INTEGER PRIMARY KEY," + " CanDeleteSeq INTEGER" + ");"; + + std::int64_t count = 0; + { + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional countO; + session << "SELECT COUNT(Key) FROM DbState WHERE Key = 1;", + soci::into(countO); + if (!countO) + Throw( + "Failed to fetch Key Count from DbState."); + count = *countO; + } + + if (!count) + { + session << "INSERT INTO DbState VALUES (1, '', '', 0);"; + } + + { + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional countO; + session << "SELECT COUNT(Key) FROM CanDelete WHERE Key = 1;", + soci::into(countO); + if (!countO) + Throw( + "Failed to fetch Key Count from CanDelete."); + count = *countO; + } + + if (!count) + { + session << "INSERT INTO CanDelete VALUES (1, 0);"; + } +} + +LedgerIndex +getCanDelete(soci::session& session) +{ + LedgerIndex seq; + session << "SELECT CanDeleteSeq FROM CanDelete WHERE Key = 1;", + soci::into(seq); + ; + return seq; +} + +LedgerIndex +setCanDelete(soci::session& session, LedgerIndex canDelete) +{ + session << "UPDATE CanDelete SET CanDeleteSeq = :canDelete WHERE Key = 1;", + soci::use(canDelete); + return canDelete; +} + +SavedState +getSavedState(soci::session& session) +{ + SavedState state; + session << "SELECT WritableDb, ArchiveDb, LastRotatedLedger" + " FROM DbState WHERE Key = 1;", + soci::into(state.writableDb), soci::into(state.archiveDb), + soci::into(state.lastRotated); + + return state; +} + +void +setSavedState(soci::session& session, SavedState const& state) +{ + session << "UPDATE DbState" + " SET WritableDb = :writableDb," + " ArchiveDb = :archiveDb," + " LastRotatedLedger = :lastRotated" + " WHERE Key = 1;", + soci::use(state.writableDb), soci::use(state.archiveDb), + soci::use(state.lastRotated); +} + +void +setLastRotated(soci::session& session, LedgerIndex seq) +{ + session << "UPDATE DbState SET LastRotatedLedger = :seq" + " WHERE Key = 1;", + soci::use(seq); +} + +/* DatabaseBody DB */ + +std::pair, std::optional> +openDatabaseBodyDb( + DatabaseCon::Setup const& setup, + boost::filesystem::path path) +{ + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional pathFromDb; + boost::optional size; + + auto conn = std::make_unique( + setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); + + auto& session = *conn->checkoutDb(); + + session << "SELECT Path FROM Download WHERE Part=0;", + soci::into(pathFromDb); + + // Try to reuse preexisting + // database. + if (pathFromDb) + { + // Can't resuse - database was + // from a different file download. + if (pathFromDb != path.string()) + { + session << "DROP TABLE Download;"; + } + + // Continuing a file download. + else + { + session << "SELECT SUM(LENGTH(Data)) FROM Download;", + soci::into(size); + } + } + + return {std::move(conn), (size ? *size : std::optional())}; +} + +std::uint64_t +databaseBodyDoPut( + soci::session& session, + std::string const& data, + std::string const& path, + std::uint64_t fileSize, + std::uint64_t part, + std::uint16_t maxRowSizePad) +{ + std::uint64_t rowSize = 0; + soci::indicator rti; + + std::uint64_t remainingInRow = 0; + + auto be = + dynamic_cast(session.get_backend()); + BOOST_ASSERT(be); + + // This limits how large we can make the blob + // in each row. Also subtract a pad value to + // account for the other values in the row. + auto const blobMaxSize = + sqlite_api::sqlite3_limit(be->conn_, SQLITE_LIMIT_LENGTH, -1) - + maxRowSizePad; + + std::string newpath; + + auto rowInit = [&] { + session << "INSERT INTO Download VALUES (:path, zeroblob(0), 0, :part)", + soci::use(newpath), soci::use(part); + + remainingInRow = blobMaxSize; + rowSize = 0; + }; + + session << "SELECT Path,Size,Part FROM Download ORDER BY Part DESC " + "LIMIT 1", + soci::into(newpath), soci::into(rowSize), soci::into(part, rti); + + if (!session.got_data()) + { + newpath = path; + rowInit(); + } + else + remainingInRow = blobMaxSize - rowSize; + + auto insert = [&session, &rowSize, &part, &fs = fileSize]( + auto const& data) { + std::uint64_t updatedSize = rowSize + data.size(); + + session << "UPDATE Download SET Data = CAST(Data || :data AS blob), " + "Size = :size WHERE Part = :part;", + soci::use(data), soci::use(updatedSize), soci::use(part); + + fs += data.size(); + }; + + size_t currentBase = 0; + + while (currentBase + remainingInRow < data.size()) + { + if (remainingInRow) + { + insert(data.substr(currentBase, remainingInRow)); + currentBase += remainingInRow; + } + + ++part; + rowInit(); + } + + insert(data.substr(currentBase)); + + return part; +} + +void +databaseBodyFinish(soci::session& session, std::ofstream& fout) +{ + soci::rowset rs = + (session.prepare << "SELECT Data FROM Download ORDER BY PART ASC;"); + + // iteration through the resultset: + for (auto it = rs.begin(); it != rs.end(); ++it) + fout.write(it->data(), it->size()); +} + +/* Vacuum DB */ + +bool +doVacuumDB(DatabaseCon::Setup const& setup) +{ + boost::filesystem::path dbPath = setup.dataDir / TxDBName; + + uintmax_t const dbSize = file_size(dbPath); + assert(dbSize != static_cast(-1)); + + if (auto available = space(dbPath.parent_path()).available; + available < dbSize) + { + std::cerr << "The database filesystem must have at least as " + "much free space as the size of " + << dbPath.string() << ", which is " << dbSize + << " bytes. Only " << available << " bytes are available.\n"; + return false; + } + + auto txnDB = + std::make_unique(setup, TxDBName, TxDBPragma, TxDBInit); + auto& session = txnDB->getSession(); + std::uint32_t pageSize; + + // Only the most trivial databases will fit in memory on typical + // (recommended) software. Force temp files to be written to disk + // regardless of the config settings. + session << boost::format(CommonDBPragmaTemp) % "file"; + session << "PRAGMA page_size;", soci::into(pageSize); + + std::cout << "VACUUM beginning. page_size: " << pageSize << std::endl; + + session << "VACUUM;"; + assert(setup.globalPragma); + for (auto const& p : *setup.globalPragma) + session << p; + session << "PRAGMA page_size;", soci::into(pageSize); + + std::cout << "VACUUM finished. page_size: " << pageSize << std::endl; + + return true; +} + +/* PeerFinder DB */ + +void +initPeerFinderDB( + soci::session& session, + BasicConfig const& config, + beast::Journal j) +{ + DBConfig m_sociConfig(config, "peerfinder"); + m_sociConfig.open(session); + + JLOG(j.info()) << "Opening database at '" << m_sociConfig.connectionString() + << "'"; + + soci::transaction tr(session); + session << "PRAGMA encoding=\"UTF-8\";"; + + session << "CREATE TABLE IF NOT EXISTS SchemaVersion ( " + " name TEXT PRIMARY KEY, " + " version INTEGER" + ");"; + + session << "CREATE TABLE IF NOT EXISTS PeerFinder_BootstrapCache ( " + " id INTEGER PRIMARY KEY AUTOINCREMENT, " + " address TEXT UNIQUE NOT NULL, " + " valence INTEGER" + ");"; + + session << "CREATE INDEX IF NOT EXISTS " + " PeerFinder_BootstrapCache_Index ON " + "PeerFinder_BootstrapCache " + " ( " + " address " + " ); "; + + tr.commit(); +} + +void +updatePeerFinderDB( + soci::session& session, + int currentSchemaVersion, + beast::Journal j) +{ + soci::transaction tr(session); + // get version + int version(0); + { + // SOCI requires a boost::optional (not std::optional) parameter. + boost::optional vO; + session << "SELECT " + " version " + "FROM SchemaVersion WHERE " + " name = 'PeerFinder';", + soci::into(vO); + + version = vO.value_or(0); + + JLOG(j.info()) << "Opened version " << version << " database"; + } + + { + if (version < currentSchemaVersion) + { + JLOG(j.info()) << "Updating database to version " + << currentSchemaVersion; + } + else if (version > currentSchemaVersion) + { + Throw( + "The PeerFinder database version is higher than expected"); + } + } + + if (version < 4) + { + // + // Remove the "uptime" column from the bootstrap table + // + + session << "CREATE TABLE IF NOT EXISTS " + "PeerFinder_BootstrapCache_Next ( " + " id INTEGER PRIMARY KEY AUTOINCREMENT, " + " address TEXT UNIQUE NOT NULL, " + " valence INTEGER" + ");"; + + session << "CREATE INDEX IF NOT EXISTS " + " PeerFinder_BootstrapCache_Next_Index ON " + " PeerFinder_BootstrapCache_Next " + " ( address ); "; + + std::size_t count; + session << "SELECT COUNT(*) FROM PeerFinder_BootstrapCache;", + soci::into(count); + + std::vector list; + + { + list.reserve(count); + std::string s; + int valence; + soci::statement st = + (session.prepare << "SELECT " + " address, " + " valence " + "FROM PeerFinder_BootstrapCache;", + soci::into(s), + soci::into(valence)); + + st.execute(); + while (st.fetch()) + { + PeerFinder::Store::Entry entry; + entry.endpoint = beast::IP::Endpoint::from_string(s); + if (!is_unspecified(entry.endpoint)) + { + entry.valence = valence; + list.push_back(entry); + } + else + { + JLOG(j.error()) << "Bad address string '" << s + << "' in Bootcache table"; + } + } + } + + if (!list.empty()) + { + std::vector s; + std::vector valence; + s.reserve(list.size()); + valence.reserve(list.size()); + + for (auto iter(list.cbegin()); iter != list.cend(); ++iter) + { + s.emplace_back(to_string(iter->endpoint)); + valence.emplace_back(iter->valence); + } + + session << "INSERT INTO PeerFinder_BootstrapCache_Next ( " + " address, " + " valence " + ") VALUES ( " + " :s, :valence" + ");", + soci::use(s), soci::use(valence); + } + + session << "DROP TABLE IF EXISTS PeerFinder_BootstrapCache;"; + + session << "DROP INDEX IF EXISTS PeerFinder_BootstrapCache_Index;"; + + session << "ALTER TABLE PeerFinder_BootstrapCache_Next " + " RENAME TO PeerFinder_BootstrapCache;"; + + session << "CREATE INDEX IF NOT EXISTS " + " PeerFinder_BootstrapCache_Index ON " + "PeerFinder_BootstrapCache " + " ( " + " address " + " ); "; + } + + if (version < 3) + { + // + // Remove legacy endpoints from the schema + // + + session << "DROP TABLE IF EXISTS LegacyEndpoints;"; + + session << "DROP TABLE IF EXISTS PeerFinderLegacyEndpoints;"; + + session << "DROP TABLE IF EXISTS PeerFinder_LegacyEndpoints;"; + + session << "DROP TABLE IF EXISTS PeerFinder_LegacyEndpoints_Index;"; + } + + { + int const v(currentSchemaVersion); + session << "INSERT OR REPLACE INTO SchemaVersion (" + " name " + " ,version " + ") VALUES ( " + " 'PeerFinder', :version " + ");", + soci::use(v); + } + + tr.commit(); +} + +void +readPeerFinderDB( + soci::session& session, + std::function const& func) +{ + std::string s; + int valence; + soci::statement st = + (session.prepare << "SELECT " + " address, " + " valence " + "FROM PeerFinder_BootstrapCache;", + soci::into(s), + soci::into(valence)); + + st.execute(); + while (st.fetch()) + { + func(s, valence); + } +} + +void +savePeerFinderDB( + soci::session& session, + std::vector const& v) +{ + soci::transaction tr(session); + session << "DELETE FROM PeerFinder_BootstrapCache;"; + + if (!v.empty()) + { + std::vector s; + std::vector valence; + s.reserve(v.size()); + valence.reserve(v.size()); + + for (auto const& e : v) + { + s.emplace_back(to_string(e.endpoint)); + valence.emplace_back(e.valence); + } + + session << "INSERT INTO PeerFinder_BootstrapCache ( " + " address, " + " valence " + ") VALUES ( " + " :s, :valence " + ");", + soci::use(s), soci::use(valence); + } + + tr.commit(); +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp b/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp new file mode 100644 index 0000000000..394770f790 --- /dev/null +++ b/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp @@ -0,0 +1,1429 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +/** + * @brief to_string Returns name of table by table ID. + * @param type Table ID. + * @return Name of the table. + */ +static std::string +to_string(TableType type) +{ + static_assert( + TableTypeCount == 3, + "Need to modify switch statement if enum is modified"); + switch (type) + { + case TableType::Ledgers: + return "Ledgers"; + case TableType::Transactions: + return "Transactions"; + case TableType::AccountTransactions: + return "AccountTransactions"; + default: + assert(0); + return "Unknown"; + } +} + +DatabasePairValid +makeLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup) +{ + // ledger database + auto lgr{std::make_unique( + setup, LgrDBName, LgrDBPragma, LgrDBInit, checkpointerSetup)}; + lgr->getSession() << boost::str( + boost::format("PRAGMA cache_size=-%d;") % + kilobytes(config.getValueFor(SizedItem::lgrDBCache))); + + if (config.useTxTables()) + { + // transaction database + auto tx{std::make_unique( + setup, TxDBName, TxDBPragma, TxDBInit, checkpointerSetup)}; + tx->getSession() << boost::str( + boost::format("PRAGMA cache_size=-%d;") % + kilobytes(config.getValueFor(SizedItem::txnDBCache))); + + if (!setup.standAlone || setup.startUp == Config::LOAD || + setup.startUp == Config::LOAD_FILE || + setup.startUp == Config::REPLAY) + { + // Check if AccountTransactions has primary key + std::string cid, name, type; + std::size_t notnull, dflt_value, pk; + soci::indicator ind; + soci::statement st = + (tx->getSession().prepare + << ("PRAGMA table_info(AccountTransactions);"), + soci::into(cid), + soci::into(name), + soci::into(type), + soci::into(notnull), + soci::into(dflt_value, ind), + soci::into(pk)); + + st.execute(); + while (st.fetch()) + { + if (pk == 1) + { + return {std::move(lgr), std::move(tx), false}; + } + } + } + + return {std::move(lgr), std::move(tx), true}; + } + else + return {std::move(lgr), {}, true}; +} + +std::optional +getMinLedgerSeq(soci::session& session, TableType type) +{ + std::string query = "SELECT MIN(LedgerSeq) FROM " + to_string(type) + ";"; + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional m; + session << query, soci::into(m); + return m ? *m : std::optional(); +} + +std::optional +getMaxLedgerSeq(soci::session& session, TableType type) +{ + std::string query = "SELECT MAX(LedgerSeq) FROM " + to_string(type) + ";"; + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional m; + session << query, soci::into(m); + return m ? *m : std::optional(); +} + +void +deleteByLedgerSeq(soci::session& session, TableType type, LedgerIndex ledgerSeq) +{ + session << "DELETE FROM " << to_string(type) + << " WHERE LedgerSeq == " << ledgerSeq << ";"; +} + +void +deleteBeforeLedgerSeq( + soci::session& session, + TableType type, + LedgerIndex ledgerSeq) +{ + session << "DELETE FROM " << to_string(type) << " WHERE LedgerSeq < " + << ledgerSeq << ";"; +} + +std::size_t +getRows(soci::session& session, TableType type) +{ + std::size_t rows; + session << "SELECT COUNT(*) AS rows " + "FROM " + << to_string(type) << ";", + soci::into(rows); + + return rows; +} + +RelationalDBInterface::CountMinMax +getRowsMinMax(soci::session& session, TableType type) +{ + RelationalDBInterface::CountMinMax res; + session << "SELECT COUNT(*) AS rows, " + "MIN(LedgerSeq) AS first, " + "MAX(LedgerSeq) AS last " + "FROM " + << to_string(type) << ";", + soci::into(res.numberOfRows), soci::into(res.minLedgerSequence), + soci::into(res.maxLedgerSequence); + + return res; +} + +bool +saveValidatedLedger( + DatabaseCon& ldgDB, + DatabaseCon& txnDB, + Application& app, + std::shared_ptr const& ledger, + bool current) +{ + auto j = app.journal("Ledger"); + auto seq = ledger->info().seq; + + // TODO(tom): Fix this hard-coded SQL! + JLOG(j.trace()) << "saveValidatedLedger " << (current ? "" : "fromAcquire ") + << seq; + + if (!ledger->info().accountHash.isNonZero()) + { + JLOG(j.fatal()) << "AH is zero: " << getJson({*ledger, {}}); + assert(false); + } + + if (ledger->info().accountHash != ledger->stateMap().getHash().as_uint256()) + { + JLOG(j.fatal()) << "sAL: " << ledger->info().accountHash + << " != " << ledger->stateMap().getHash(); + JLOG(j.fatal()) << "saveAcceptedLedger: seq=" << seq + << ", current=" << current; + assert(false); + } + + assert(ledger->info().txHash == ledger->txMap().getHash().as_uint256()); + + // Save the ledger header in the hashed object store + { + Serializer s(128); + s.add32(HashPrefix::ledgerMaster); + addRaw(ledger->info(), s); + app.getNodeStore().store( + hotLEDGER, std::move(s.modData()), ledger->info().hash, seq); + } + + AcceptedLedger::pointer aLedger; + try + { + aLedger = app.getAcceptedLedgerCache().fetch(ledger->info().hash); + if (!aLedger) + { + aLedger = std::make_shared(ledger, app); + app.getAcceptedLedgerCache().canonicalize_replace_client( + ledger->info().hash, aLedger); + } + } + catch (std::exception const&) + { + JLOG(j.warn()) << "An accepted ledger was missing nodes"; + app.getLedgerMaster().failedSave(seq, ledger->info().hash); + // Clients can now trust the database for information about this + // ledger sequence. + app.pendingSaves().finishWork(seq); + return false; + } + + { + static boost::format deleteLedger( + "DELETE FROM Ledgers WHERE LedgerSeq = %u;"); + static boost::format deleteTrans1( + "DELETE FROM Transactions WHERE LedgerSeq = %u;"); + static boost::format deleteTrans2( + "DELETE FROM AccountTransactions WHERE LedgerSeq = %u;"); + static boost::format deleteAcctTrans( + "DELETE FROM AccountTransactions WHERE TransID = '%s';"); + + { + auto db = ldgDB.checkoutDb(); + *db << boost::str(deleteLedger % seq); + } + + if (app.config().useTxTables()) + { + auto db = txnDB.checkoutDb(); + + soci::transaction tr(*db); + + *db << boost::str(deleteTrans1 % seq); + *db << boost::str(deleteTrans2 % seq); + + std::string const ledgerSeq(std::to_string(seq)); + + for (auto const& [_, acceptedLedgerTx] : aLedger->getMap()) + { + (void)_; + uint256 transactionID = acceptedLedgerTx->getTransactionID(); + + std::string const txnId(to_string(transactionID)); + std::string const txnSeq( + std::to_string(acceptedLedgerTx->getTxnSeq())); + + *db << boost::str(deleteAcctTrans % transactionID); + + auto const& accts = acceptedLedgerTx->getAffected(); + + if (!accts.empty()) + { + std::string sql( + "INSERT INTO AccountTransactions " + "(TransID, Account, LedgerSeq, TxnSeq) VALUES "); + + // Try to make an educated guess on how much space we'll + // need for our arguments. In argument order we have: 64 + // + 34 + 10 + 10 = 118 + 10 extra = 128 bytes + sql.reserve(sql.length() + (accts.size() * 128)); + + bool first = true; + for (auto const& account : accts) + { + if (!first) + sql += ", ('"; + else + { + sql += "('"; + first = false; + } + + sql += txnId; + sql += "','"; + sql += app.accountIDCache().toBase58(account); + sql += "',"; + sql += ledgerSeq; + sql += ","; + sql += txnSeq; + sql += ")"; + } + sql += ";"; + JLOG(j.trace()) << "ActTx: " << sql; + *db << sql; + } + else + { + JLOG(j.warn()) << "Transaction in ledger " << seq + << " affects no accounts"; + JLOG(j.warn()) << acceptedLedgerTx->getTxn()->getJson( + JsonOptions::none); + } + + *db + << (STTx::getMetaSQLInsertReplaceHeader() + + acceptedLedgerTx->getTxn()->getMetaSQL( + seq, acceptedLedgerTx->getEscMeta()) + + ";"); + + app.getMasterTransaction().inLedger(transactionID, seq); + } + + tr.commit(); + } + + { + static std::string addLedger( + R"sql(INSERT OR REPLACE INTO Ledgers + (LedgerHash,LedgerSeq,PrevHash,TotalCoins,ClosingTime,PrevClosingTime, + CloseTimeRes,CloseFlags,AccountSetHash,TransSetHash) + VALUES + (:ledgerHash,:ledgerSeq,:prevHash,:totalCoins,:closingTime,:prevClosingTime, + :closeTimeRes,:closeFlags,:accountSetHash,:transSetHash);)sql"); + + auto db(ldgDB.checkoutDb()); + + soci::transaction tr(*db); + + auto const hash = to_string(ledger->info().hash); + auto const parentHash = to_string(ledger->info().parentHash); + auto const drops = to_string(ledger->info().drops); + auto const closeTime = + ledger->info().closeTime.time_since_epoch().count(); + auto const parentCloseTime = + ledger->info().parentCloseTime.time_since_epoch().count(); + auto const closeTimeResolution = + ledger->info().closeTimeResolution.count(); + auto const closeFlags = ledger->info().closeFlags; + auto const accountHash = to_string(ledger->info().accountHash); + auto const txHash = to_string(ledger->info().txHash); + + *db << addLedger, soci::use(hash), soci::use(seq), + soci::use(parentHash), soci::use(drops), soci::use(closeTime), + soci::use(parentCloseTime), soci::use(closeTimeResolution), + soci::use(closeFlags), soci::use(accountHash), + soci::use(txHash); + + tr.commit(); + } + } + + return true; +} + +/** + * @brief getLedgerInfo Returns info of ledger with special condition + * given as SQL query. + * @param session Session with database. + * @param sqlSuffix Special condition for found the ledger. + * @param j Journal. + * @return Ledger info or none if ledger not found. + */ +static std::optional +getLedgerInfo( + soci::session& session, + std::string const& sqlSuffix, + beast::Journal j) +{ + // SOCI requires boost::optional (not std::optional) as parameters. + boost::optional hash, parentHash, accountHash, txHash; + boost::optional seq, drops, closeTime, parentCloseTime, + closeTimeResolution, closeFlags; + + std::string const sql = + "SELECT " + "LedgerHash, PrevHash, AccountSetHash, TransSetHash, " + "TotalCoins," + "ClosingTime, PrevClosingTime, CloseTimeRes, CloseFlags," + "LedgerSeq FROM Ledgers " + + sqlSuffix + ";"; + + session << sql, soci::into(hash), soci::into(parentHash), + soci::into(accountHash), soci::into(txHash), soci::into(drops), + soci::into(closeTime), soci::into(parentCloseTime), + soci::into(closeTimeResolution), soci::into(closeFlags), + soci::into(seq); + + if (!session.got_data()) + { + JLOG(j.debug()) << "Ledger not found: " << sqlSuffix; + return {}; + } + + using time_point = NetClock::time_point; + using duration = NetClock::duration; + + LedgerInfo info; + + if (hash && !info.hash.parseHex(*hash)) + { + JLOG(j.debug()) << "Hash parse error for ledger: " << sqlSuffix; + return {}; + } + + if (parentHash && !info.parentHash.parseHex(*parentHash)) + { + JLOG(j.debug()) << "parentHash parse error for ledger: " << sqlSuffix; + return {}; + } + + if (accountHash && !info.accountHash.parseHex(*accountHash)) + { + JLOG(j.debug()) << "accountHash parse error for ledger: " << sqlSuffix; + return {}; + } + + if (txHash && !info.txHash.parseHex(*txHash)) + { + JLOG(j.debug()) << "txHash parse error for ledger: " << sqlSuffix; + return {}; + } + + info.seq = rangeCheckedCast(seq.value_or(0)); + info.drops = drops.value_or(0); + info.closeTime = time_point{duration{closeTime.value_or(0)}}; + info.parentCloseTime = time_point{duration{parentCloseTime.value_or(0)}}; + info.closeFlags = closeFlags.value_or(0); + info.closeTimeResolution = duration{closeTimeResolution.value_or(0)}; + + return info; +} + +std::optional +getLedgerInfoByIndex( + soci::session& session, + LedgerIndex ledgerSeq, + beast::Journal j) +{ + std::ostringstream s; + s << "WHERE LedgerSeq = " << ledgerSeq; + return getLedgerInfo(session, s.str(), j); +} + +std::optional +getNewestLedgerInfo(soci::session& session, beast::Journal j) +{ + std::ostringstream s; + s << "ORDER BY LedgerSeq DESC LIMIT 1"; + return getLedgerInfo(session, s.str(), j); +} + +std::optional +getLimitedOldestLedgerInfo( + soci::session& session, + LedgerIndex ledgerFirstIndex, + beast::Journal j) +{ + std::ostringstream s; + s << "WHERE LedgerSeq >= " + std::to_string(ledgerFirstIndex) + + " ORDER BY LedgerSeq ASC LIMIT 1"; + return getLedgerInfo(session, s.str(), j); +} + +std::optional +getLimitedNewestLedgerInfo( + soci::session& session, + LedgerIndex ledgerFirstIndex, + beast::Journal j) +{ + std::ostringstream s; + s << "WHERE LedgerSeq >= " + std::to_string(ledgerFirstIndex) + + " ORDER BY LedgerSeq DESC LIMIT 1"; + return getLedgerInfo(session, s.str(), j); +} + +std::optional +getLedgerInfoByHash( + soci::session& session, + uint256 const& ledgerHash, + beast::Journal j) +{ + std::ostringstream s; + s << "WHERE LedgerHash = '" << ledgerHash << "'"; + return getLedgerInfo(session, s.str(), j); +} + +uint256 +getHashByIndex(soci::session& session, LedgerIndex ledgerIndex) +{ + uint256 ret; + + std::string sql = + "SELECT LedgerHash FROM Ledgers INDEXED BY SeqLedger WHERE LedgerSeq='"; + sql.append(beast::lexicalCastThrow(ledgerIndex)); + sql.append("';"); + + std::string hash; + { + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional lh; + session << sql, soci::into(lh); + + if (!session.got_data() || !lh) + return ret; + + hash = *lh; + if (hash.empty()) + return ret; + } + + if (!ret.parseHex(hash)) + return ret; + + return ret; +} + +std::optional +getHashesByIndex( + soci::session& session, + LedgerIndex ledgerIndex, + beast::Journal j) +{ + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional lhO, phO; + + session << "SELECT LedgerHash,PrevHash FROM Ledgers " + "INDEXED BY SeqLedger WHERE LedgerSeq = :ls;", + soci::into(lhO), soci::into(phO), soci::use(ledgerIndex); + + if (!lhO || !phO) + { + auto stream = j.trace(); + JLOG(stream) << "Don't have ledger " << ledgerIndex; + return {}; + } + + LedgerHashPair hashes; + if (!hashes.ledgerHash.parseHex(*lhO) || !hashes.parentHash.parseHex(*phO)) + { + auto stream = j.trace(); + JLOG(stream) << "Error parse hashes for ledger " << ledgerIndex; + return {}; + } + + return hashes; +} + +std::map +getHashesByIndex( + soci::session& session, + LedgerIndex minSeq, + LedgerIndex maxSeq, + beast::Journal j) +{ + std::string sql = + "SELECT LedgerSeq,LedgerHash,PrevHash FROM Ledgers WHERE LedgerSeq >= "; + sql.append(beast::lexicalCastThrow(minSeq)); + sql.append(" AND LedgerSeq <= "); + sql.append(beast::lexicalCastThrow(maxSeq)); + sql.append(";"); + + std::uint64_t ls; + std::string lh; + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional ph; + soci::statement st = + (session.prepare << sql, + soci::into(ls), + soci::into(lh), + soci::into(ph)); + + st.execute(); + std::map res; + while (st.fetch()) + { + LedgerHashPair& hashes = res[rangeCheckedCast(ls)]; + if (!hashes.ledgerHash.parseHex(lh)) + { + JLOG(j.warn()) << "Error parsed hash for ledger seq: " << ls; + } + if (!ph) + { + JLOG(j.warn()) << "Null prev hash for ledger seq: " << ls; + } + else if (!hashes.parentHash.parseHex(*ph)) + { + JLOG(j.warn()) << "Error parsed prev hash for ledger seq: " << ls; + } + } + return res; +} + +std::pair>, int> +getTxHistory( + soci::session& session, + Application& app, + LedgerIndex startIndex, + int quantity, + bool count) +{ + std::string sql = boost::str( + boost::format( + "SELECT LedgerSeq, Status, RawTxn " + "FROM Transactions ORDER BY LedgerSeq DESC LIMIT %u,%u;") % + startIndex % quantity); + + std::vector> txs; + int total = 0; + + { + // SOCI requires boost::optional (not std::optional) as parameters. + boost::optional ledgerSeq; + boost::optional status; + soci::blob sociRawTxnBlob(session); + soci::indicator rti; + Blob rawTxn; + + soci::statement st = + (session.prepare << sql, + soci::into(ledgerSeq), + soci::into(status), + soci::into(sociRawTxnBlob, rti)); + + st.execute(); + while (st.fetch()) + { + if (soci::i_ok == rti) + convert(sociRawTxnBlob, rawTxn); + else + rawTxn.clear(); + + if (auto trans = Transaction::transactionFromSQL( + ledgerSeq, status, rawTxn, app)) + { + total++; + txs.push_back(trans); + } + } + + if (!total && count) + { + session << "SELECT COUNT(*) FROM Transactions;", soci::into(total); + + total = -total; + } + } + + return {txs, total}; +} + +/** + * @brief transactionsSQL Returns SQL query to select oldest or newest + * transactions in decoded or binary form for given account which + * match given criteria starting from given offset. + * @param app Application object. + * @param selection List of table fields to select from database. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number of transactions already returned in calls + * to another shard databases, if shard databases are used. + * None if node database is used. + * @param descending True for descending order, false for ascending. + * @param binary True for binary form, false for decoded. + * @param count True for count number of transaction, false for select it. + * @param j Journal. + * @return SQL query string. + */ +static std::string +transactionsSQL( + Application& app, + std::string selection, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + bool descending, + bool binary, + bool count, + beast::Journal j) +{ + constexpr std::uint32_t NONBINARY_PAGE_LENGTH = 200; + constexpr std::uint32_t BINARY_PAGE_LENGTH = 500; + + std::uint32_t numberOfResults; + + if (count) + { + numberOfResults = std::numeric_limits::max(); + } + else if (options.limit == UINT32_MAX) + { + numberOfResults = binary ? BINARY_PAGE_LENGTH : NONBINARY_PAGE_LENGTH; + } + else if (!options.bUnlimited) + { + numberOfResults = std::min( + binary ? BINARY_PAGE_LENGTH : NONBINARY_PAGE_LENGTH, options.limit); + } + else + { + numberOfResults = options.limit; + } + + if (limit_used) + { + if (numberOfResults <= *limit_used) + return ""; + else + numberOfResults -= *limit_used; + } + + std::string maxClause = ""; + std::string minClause = ""; + + if (options.maxLedger) + { + maxClause = boost::str( + boost::format("AND AccountTransactions.LedgerSeq <= '%u'") % + options.maxLedger); + } + + if (options.minLedger) + { + minClause = boost::str( + boost::format("AND AccountTransactions.LedgerSeq >= '%u'") % + options.minLedger); + } + + std::string sql; + + if (count) + sql = boost::str( + boost::format("SELECT %s FROM AccountTransactions " + "WHERE Account = '%s' %s %s LIMIT %u, %u;") % + selection % app.accountIDCache().toBase58(options.account) % + maxClause % minClause % + beast::lexicalCastThrow(options.offset) % + beast::lexicalCastThrow(numberOfResults)); + else + sql = boost::str( + boost::format( + "SELECT %s FROM " + "AccountTransactions INNER JOIN Transactions " + "ON Transactions.TransID = AccountTransactions.TransID " + "WHERE Account = '%s' %s %s " + "ORDER BY AccountTransactions.LedgerSeq %s, " + "AccountTransactions.TxnSeq %s, AccountTransactions.TransID %s " + "LIMIT %u, %u;") % + selection % app.accountIDCache().toBase58(options.account) % + maxClause % minClause % (descending ? "DESC" : "ASC") % + (descending ? "DESC" : "ASC") % (descending ? "DESC" : "ASC") % + beast::lexicalCastThrow(options.offset) % + beast::lexicalCastThrow(numberOfResults)); + JLOG(j.trace()) << "txSQL query: " << sql; + return sql; +} + +/** + * @brief getAccountTxs Returns oldest or newest transactions for given + * account which match given criteria starting from given offset. + * @param session Session with database. + * @param app Application object. + * @param ledgerMaster LedgerMaster object. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number of transactions already returned in calls + * to another shard databases, if shard databases are used. + * None if node database is used. + * @param descending True for descending order, false for ascending. + * @param j Journal. + * @return Vector of pairs of found transactions and its metadata + * sorted in given order by account sequence. + * Also number of transactions processed. + */ +static std::pair +getAccountTxs( + soci::session& session, + Application& app, + LedgerMaster& ledgerMaster, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + bool descending, + beast::Journal j) +{ + RelationalDBInterface::AccountTxs ret; + + std::string sql = transactionsSQL( + app, + "AccountTransactions.LedgerSeq,Status,RawTxn,TxnMeta", + options, + limit_used, + descending, + false, + false, + j); + if (sql == "") + return {ret, 0}; + + int total = 0; + { + // SOCI requires boost::optional (not std::optional) as parameters. + boost::optional ledgerSeq; + boost::optional status; + soci::blob sociTxnBlob(session), sociTxnMetaBlob(session); + soci::indicator rti, tmi; + Blob rawTxn, txnMeta; + + soci::statement st = + (session.prepare << sql, + soci::into(ledgerSeq), + soci::into(status), + soci::into(sociTxnBlob, rti), + soci::into(sociTxnMetaBlob, tmi)); + + st.execute(); + while (st.fetch()) + { + if (soci::i_ok == rti) + convert(sociTxnBlob, rawTxn); + else + rawTxn.clear(); + + if (soci::i_ok == tmi) + convert(sociTxnMetaBlob, txnMeta); + else + txnMeta.clear(); + + auto txn = + Transaction::transactionFromSQL(ledgerSeq, status, rawTxn, app); + + if (txnMeta.empty()) + { // Work around a bug that could leave the metadata missing + auto const seq = + rangeCheckedCast(ledgerSeq.value_or(0)); + + JLOG(j.warn()) + << "Recovering ledger " << seq << ", txn " << txn->getID(); + + if (auto l = ledgerMaster.getLedgerBySeq(seq)) + pendSaveValidated(app, l, false, false); + } + + if (txn) + { + ret.emplace_back( + txn, + std::make_shared( + txn->getID(), txn->getLedger(), txnMeta)); + total++; + } + } + + if (!total && limit_used) + { + RelationalDBInterface::AccountTxOptions opt = options; + opt.offset = 0; + std::string sql1 = transactionsSQL( + app, "COUNT(*)", opt, limit_used, descending, false, false, j); + + session << sql1, soci::into(total); + + total = ~total; + } + } + + return {ret, total}; +} + +std::pair +getOldestAccountTxs( + soci::session& session, + Application& app, + LedgerMaster& ledgerMaster, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + beast::Journal j) +{ + return getAccountTxs( + session, app, ledgerMaster, options, limit_used, false, j); +} + +std::pair +getNewestAccountTxs( + soci::session& session, + Application& app, + LedgerMaster& ledgerMaster, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + beast::Journal j) +{ + return getAccountTxs( + session, app, ledgerMaster, options, limit_used, true, j); +} + +/** + * @brief getAccountTxsB Returns oldest or newset transactions in binary + * form for given account which match given criteria starting from + * given offset. + * @param session Session with database. + * @param app Application object. + * @param options Struct AccountTxOptions which contain criteria to match: + * the account, minimum and maximum ledger numbers to search, + * offset of first entry to return, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number or transactions already returned in calls + * to another shard databases, if shard databases are used. + * None if node database is used. + * @param descending True for descending order, false for ascending. + * @param j Journal. + * @return Vector of tuples of found transactions, its metadata and + * account sequences sorted in given order by account + * sequence. Also number of transactions processed. + */ +static std::pair, int> +getAccountTxsB( + soci::session& session, + Application& app, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + bool descending, + beast::Journal j) +{ + std::vector ret; + + std::string sql = transactionsSQL( + app, + "AccountTransactions.LedgerSeq,Status,RawTxn,TxnMeta", + options, + limit_used, + descending, + true /*binary*/, + false, + j); + if (sql == "") + return {ret, 0}; + + int total = 0; + + { + // SOCI requires boost::optional (not std::optional) as parameters. + boost::optional ledgerSeq; + boost::optional status; + soci::blob sociTxnBlob(session), sociTxnMetaBlob(session); + soci::indicator rti, tmi; + + soci::statement st = + (session.prepare << sql, + soci::into(ledgerSeq), + soci::into(status), + soci::into(sociTxnBlob, rti), + soci::into(sociTxnMetaBlob, tmi)); + + st.execute(); + while (st.fetch()) + { + Blob rawTxn; + if (soci::i_ok == rti) + convert(sociTxnBlob, rawTxn); + Blob txnMeta; + if (soci::i_ok == tmi) + convert(sociTxnMetaBlob, txnMeta); + + auto const seq = + rangeCheckedCast(ledgerSeq.value_or(0)); + + ret.emplace_back(std::move(rawTxn), std::move(txnMeta), seq); + total++; + } + + if (!total && limit_used) + { + RelationalDBInterface::AccountTxOptions opt = options; + opt.offset = 0; + std::string sql1 = transactionsSQL( + app, "COUNT(*)", opt, limit_used, descending, true, false, j); + + session << sql1, soci::into(total); + + total = ~total; + } + } + + return {ret, total}; +} + +std::pair, int> +getOldestAccountTxsB( + soci::session& session, + Application& app, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + beast::Journal j) +{ + return getAccountTxsB(session, app, options, limit_used, false, j); +} + +std::pair, int> +getNewestAccountTxsB( + soci::session& session, + Application& app, + RelationalDBInterface::AccountTxOptions const& options, + std::optional const& limit_used, + beast::Journal j) +{ + return getAccountTxsB(session, app, options, limit_used, true, j); +} + +/** + * @brief accountTxPage Searches oldest or newest transactions for given + * account which match given criteria starting from given marker + * and calls callback for each found transaction. + * @param session Session with database. + * @param idCache Account ID cache. + * @param onUnsavedLedger Callback function to call on each found unsaved + * ledger within given range. + * @param onTransaction Callback function to call on eahc found transaction. + * @param options Struct AccountTxPageOptions which contain criteria to + * match: the account, minimum and maximum ledger numbers to search, + * marker of first returned entry, number of transactions to return, + * flag if this number unlimited. + * @param limit_used Number or transactions already returned in calls + * to another shard databases. + * @param page_length Total number of transactions to return. + * @param forward True for ascending order, false for descending. + * @return Vector of tuples of found transactions, its metadata and + * account sequences sorted in given order by account + * sequence and marker for next search if search not finished. + * Also number of transactions processed during this call. + */ +static std::pair, int> +accountTxPage( + soci::session& session, + AccountIDCache const& idCache, + std::function const& onUnsavedLedger, + std::function< + void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& + onTransaction, + RelationalDBInterface::AccountTxPageOptions const& options, + int limit_used, + std::uint32_t page_length, + bool forward) +{ + int total = 0; + + bool lookingForMarker = options.marker.has_value(); + + std::uint32_t numberOfResults; + + if (options.limit == 0 || options.limit == UINT32_MAX || + (options.limit > page_length && !options.bAdmin)) + numberOfResults = page_length; + else + numberOfResults = options.limit; + + if (numberOfResults < limit_used) + return {options.marker, -1}; + numberOfResults -= limit_used; + + // As an account can have many thousands of transactions, there is a limit + // placed on the amount of transactions returned. If the limit is reached + // before the result set has been exhausted (we always query for one more + // than the limit), then we return an opaque marker that can be supplied in + // a subsequent query. + std::uint32_t queryLimit = numberOfResults + 1; + std::uint32_t findLedger = 0, findSeq = 0; + + if (lookingForMarker) + { + findLedger = options.marker->ledgerSeq; + findSeq = options.marker->txnSeq; + } + + std::optional newmarker; + if (limit_used > 0) + newmarker = options.marker; + + static std::string const prefix( + R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, + Status,RawTxn,TxnMeta + FROM AccountTransactions INNER JOIN Transactions + ON Transactions.TransID = AccountTransactions.TransID + AND AccountTransactions.Account = '%s' WHERE + )"); + + std::string sql; + + // SQL's BETWEEN uses a closed interval ([a,b]) + + const char* const order = forward ? "ASC" : "DESC"; + + if (findLedger == 0) + { + sql = boost::str( + boost::format( + prefix + (R"(AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u' + ORDER BY AccountTransactions.LedgerSeq %s, + AccountTransactions.TxnSeq %s + LIMIT %u;)")) % + idCache.toBase58(options.account) % options.minLedger % + options.maxLedger % order % order % queryLimit); + } + else + { + const char* const compare = forward ? ">=" : "<="; + const std::uint32_t minLedger = + forward ? findLedger + 1 : options.minLedger; + const std::uint32_t maxLedger = + forward ? options.maxLedger : findLedger - 1; + + auto b58acct = idCache.toBase58(options.account); + sql = boost::str( + boost::format(( + R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, + Status,RawTxn,TxnMeta + FROM AccountTransactions, Transactions WHERE + (AccountTransactions.TransID = Transactions.TransID AND + AccountTransactions.Account = '%s' AND + AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u') + OR + (AccountTransactions.TransID = Transactions.TransID AND + AccountTransactions.Account = '%s' AND + AccountTransactions.LedgerSeq = '%u' AND + AccountTransactions.TxnSeq %s '%u') + ORDER BY AccountTransactions.LedgerSeq %s, + AccountTransactions.TxnSeq %s + LIMIT %u; + )")) % + b58acct % minLedger % maxLedger % b58acct % findLedger % compare % + findSeq % order % order % queryLimit); + } + + { + Blob rawData; + Blob rawMeta; + + // SOCI requires boost::optional (not std::optional) as parameters. + boost::optional ledgerSeq; + boost::optional txnSeq; + boost::optional status; + soci::blob txnData(session); + soci::blob txnMeta(session); + soci::indicator dataPresent, metaPresent; + + soci::statement st = + (session.prepare << sql, + soci::into(ledgerSeq), + soci::into(txnSeq), + soci::into(status), + soci::into(txnData, dataPresent), + soci::into(txnMeta, metaPresent)); + + st.execute(); + + while (st.fetch()) + { + if (lookingForMarker) + { + if (findLedger == ledgerSeq.value_or(0) && + findSeq == txnSeq.value_or(0)) + { + lookingForMarker = false; + } + else + continue; + } + else if (numberOfResults == 0) + { + newmarker = { + rangeCheckedCast(ledgerSeq.value_or(0)), + txnSeq.value_or(0)}; + break; + } + + if (dataPresent == soci::i_ok) + convert(txnData, rawData); + else + rawData.clear(); + + if (metaPresent == soci::i_ok) + convert(txnMeta, rawMeta); + else + rawMeta.clear(); + + // Work around a bug that could leave the metadata missing + if (rawMeta.size() == 0) + onUnsavedLedger(ledgerSeq.value_or(0)); + + // `rawData` and `rawMeta` will be used after they are moved. + // That's OK. + onTransaction( + rangeCheckedCast(ledgerSeq.value_or(0)), + *status, + std::move(rawData), + std::move(rawMeta)); + // Note some callbacks will move the data, some will not. Clear + // them so code doesn't depend on if the data was actually moved + // or not. The code will be more efficient if `rawData` and + // `rawMeta` don't have to allocate in `convert`, so don't + // refactor my moving these variables into loop scope. + rawData.clear(); + rawMeta.clear(); + + --numberOfResults; + total++; + } + } + + return {newmarker, total}; +} + +std::pair, int> +oldestAccountTxPage( + soci::session& session, + AccountIDCache const& idCache, + std::function const& onUnsavedLedger, + std::function< + void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& + onTransaction, + RelationalDBInterface::AccountTxPageOptions const& options, + int limit_used, + std::uint32_t page_length) +{ + return accountTxPage( + session, + idCache, + onUnsavedLedger, + onTransaction, + options, + limit_used, + page_length, + true); +} + +std::pair, int> +newestAccountTxPage( + soci::session& session, + AccountIDCache const& idCache, + std::function const& onUnsavedLedger, + std::function< + void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& + onTransaction, + RelationalDBInterface::AccountTxPageOptions const& options, + int limit_used, + std::uint32_t page_length) +{ + return accountTxPage( + session, + idCache, + onUnsavedLedger, + onTransaction, + options, + limit_used, + page_length, + false); +} + +std::variant +getTransaction( + soci::session& session, + Application& app, + uint256 const& id, + std::optional> const& range, + error_code_i& ec) +{ + std::string sql = + "SELECT LedgerSeq,Status,RawTxn,TxnMeta " + "FROM Transactions WHERE TransID='"; + + sql.append(to_string(id)); + sql.append("';"); + + // SOCI requires boost::optional (not std::optional) as parameters. + boost::optional ledgerSeq; + boost::optional status; + Blob rawTxn, rawMeta; + { + soci::blob sociRawTxnBlob(session), sociRawMetaBlob(session); + soci::indicator txn, meta; + + session << sql, soci::into(ledgerSeq), soci::into(status), + soci::into(sociRawTxnBlob, txn), soci::into(sociRawMetaBlob, meta); + + auto const got_data = session.got_data(); + + if ((!got_data || txn != soci::i_ok || meta != soci::i_ok) && !range) + return TxSearched::unknown; + + if (!got_data) + { + uint64_t count = 0; + soci::indicator rti; + + session + << "SELECT COUNT(DISTINCT LedgerSeq) FROM Transactions WHERE " + "LedgerSeq BETWEEN " + << range->first() << " AND " << range->last() << ";", + soci::into(count, rti); + + if (!session.got_data() || rti != soci::i_ok) + return TxSearched::some; + + return count == (range->last() - range->first() + 1) + ? TxSearched::all + : TxSearched::some; + } + + convert(sociRawTxnBlob, rawTxn); + convert(sociRawMetaBlob, rawMeta); + } + + try + { + auto txn = + Transaction::transactionFromSQL(ledgerSeq, status, rawTxn, app); + + if (!ledgerSeq) + return std::pair{std::move(txn), nullptr}; + + std::uint32_t inLedger = + rangeCheckedCast(ledgerSeq.value()); + + auto txMeta = std::make_shared(id, inLedger, rawMeta); + + return std::pair{std::move(txn), std::move(txMeta)}; + } + catch (std::exception& e) + { + JLOG(app.journal("Ledger").warn()) + << "Unable to deserialize transaction from raw SQL value. Error: " + << e.what(); + + ec = rpcDB_DESERIALIZATION; + } + + return TxSearched::unknown; +} + +bool +dbHasSpace(soci::session& session, Config const& config, beast::Journal j) +{ + boost::filesystem::space_info space = + boost::filesystem::space(config.legacy("database_path")); + + if (space.available < megabytes(512)) + { + JLOG(j.fatal()) << "Remaining free disk space is less than 512MB"; + return false; + } + + if (config.useTxTables()) + { + DatabaseCon::Setup dbSetup = setup_DatabaseCon(config); + boost::filesystem::path dbPath = dbSetup.dataDir / TxDBName; + boost::system::error_code ec; + std::optional dbSize = + boost::filesystem::file_size(dbPath, ec); + if (ec) + { + JLOG(j.error()) + << "Error checking transaction db file size: " << ec.message(); + dbSize.reset(); + } + + static auto const pageSize = [&] { + std::uint32_t ps; + session << "PRAGMA page_size;", soci::into(ps); + return ps; + }(); + static auto const maxPages = [&] { + std::uint32_t mp; + session << "PRAGMA max_page_count;", soci::into(mp); + return mp; + }(); + std::uint32_t pageCount; + session << "PRAGMA page_count;", soci::into(pageCount); + std::uint32_t freePages = maxPages - pageCount; + std::uint64_t freeSpace = + safe_cast(freePages) * pageSize; + JLOG(j.info()) + << "Transaction DB pathname: " << dbPath.string() + << "; file size: " << dbSize.value_or(-1) << " bytes" + << "; SQLite page size: " << pageSize << " bytes" + << "; Free pages: " << freePages << "; Free space: " << freeSpace + << " bytes; " + << "Note that this does not take into account available disk " + "space."; + + if (freeSpace < megabytes(512)) + { + JLOG(j.fatal()) + << "Free SQLite space for transaction db is less than " + "512MB. To fix this, rippled must be executed with the " + "vacuum parameter before restarting. " + "Note that this activity can take multiple days, " + "depending on database size."; + return false; + } + } + + return true; +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp b/src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp new file mode 100644 index 0000000000..d242c8b4bd --- /dev/null +++ b/src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp @@ -0,0 +1,942 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +namespace ripple { + +using TxnsData = RelationalDBInterface::AccountTxs; +using TxnsDataBinary = RelationalDBInterface::MetaTxsList; + +using LedgerHash = RelationalDBInterface::LedgerHash; +using LedgerSequence = RelationalDBInterface::LedgerSequence; +using LedgerShortcut = RelationalDBInterface::LedgerShortcut; + +std::optional +getMinLedgerSeq(std::shared_ptr const& pgPool, beast::Journal j) +{ +#ifdef RIPPLED_REPORTING + auto seq = PgQuery(pgPool)("SELECT min_ledger()"); + if (!seq) + { + JLOG(j.error()) << "Error querying minimum ledger sequence."; + } + else if (!seq.isNull()) + return seq.asInt(); +#endif + return {}; +} + +std::optional +getMaxLedgerSeq(std::shared_ptr const& pgPool) +{ +#ifdef RIPPLED_REPORTING + auto seq = PgQuery(pgPool)("SELECT max_ledger()"); + if (seq && !seq.isNull()) + return seq.asBigInt(); +#endif + return {}; +} + +std::string +getCompleteLedgers(std::shared_ptr const& pgPool) +{ +#ifdef RIPPLED_REPORTING + auto range = PgQuery(pgPool)("SELECT complete_ledgers()"); + if (range) + return range.c_str(); +#endif + return "error"; +} + +std::chrono::seconds +getValidatedLedgerAge(std::shared_ptr const& pgPool, beast::Journal j) +{ + using namespace std::chrono_literals; +#ifdef RIPPLED_REPORTING + auto age = PgQuery(pgPool)("SELECT age()"); + if (!age || age.isNull()) + JLOG(j.debug()) << "No ledgers in database"; + else + return std::chrono::seconds{age.asInt()}; +#endif + return weeks{2}; +} + +/** + * @brief loadLedgerInfos Load the ledger info for the specified + * ledger/s from the database + * @param pgPool Link to postgres database + * @param whichLedger Specifies the ledger to load via ledger sequence, + * ledger hash, a range of ledgers, or std::monostate + * (which loads the most recent) + * @param app Application + * @return Vector of LedgerInfos + */ +static std::vector +loadLedgerInfos( + std::shared_ptr const& pgPool, + std::variant< + std::monostate, + uint256, + uint32_t, + std::pair> const& whichLedger, + Application& app) +{ + std::vector infos; +#ifdef RIPPLED_REPORTING + auto log = app.journal("Ledger"); + assert(app.config().reporting()); + std::stringstream sql; + sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, " + "total_coins, closing_time, prev_closing_time, close_time_res, " + "close_flags, ledger_seq FROM ledgers "; + + uint32_t expNumResults = 1; + + if (auto ledgerSeq = std::get_if(&whichLedger)) + { + sql << "WHERE ledger_seq = " + std::to_string(*ledgerSeq); + } + else if (auto ledgerHash = std::get_if(&whichLedger)) + { + sql << ("WHERE ledger_hash = \'\\x" + strHex(*ledgerHash) + "\'"); + } + else if ( + auto minAndMax = + std::get_if>(&whichLedger)) + { + expNumResults = minAndMax->second - minAndMax->first; + + sql + << ("WHERE ledger_seq >= " + std::to_string(minAndMax->first) + + " AND ledger_seq <= " + std::to_string(minAndMax->second)); + } + else + { + sql << ("ORDER BY ledger_seq desc LIMIT 1"); + } + sql << ";"; + + JLOG(log.trace()) << __func__ << " : sql = " << sql.str(); + + auto res = PgQuery(pgPool)(sql.str().data()); + if (!res) + { + JLOG(log.error()) << __func__ << " : Postgres response is null - sql = " + << sql.str(); + assert(false); + return {}; + } + else if (res.status() != PGRES_TUPLES_OK) + { + JLOG(log.error()) << __func__ + << " : Postgres response should have been " + "PGRES_TUPLES_OK but instead was " + << res.status() << " - msg = " << res.msg() + << " - sql = " << sql.str(); + assert(false); + return {}; + } + + JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); + + if (res.isNull() || res.ntuples() == 0) + { + JLOG(log.debug()) << __func__ + << " : Ledger not found. sql = " << sql.str(); + return {}; + } + else if (res.ntuples() > 0) + { + if (res.nfields() != 10) + { + JLOG(log.error()) << __func__ + << " : Wrong number of fields in Postgres " + "response. Expected 10, but got " + << res.nfields() << " . sql = " << sql.str(); + assert(false); + return {}; + } + } + + for (size_t i = 0; i < res.ntuples(); ++i) + { + char const* hash = res.c_str(i, 0); + char const* prevHash = res.c_str(i, 1); + char const* accountHash = res.c_str(i, 2); + char const* txHash = res.c_str(i, 3); + std::int64_t totalCoins = res.asBigInt(i, 4); + std::int64_t closeTime = res.asBigInt(i, 5); + std::int64_t parentCloseTime = res.asBigInt(i, 6); + std::int64_t closeTimeRes = res.asBigInt(i, 7); + std::int64_t closeFlags = res.asBigInt(i, 8); + std::int64_t ledgerSeq = res.asBigInt(i, 9); + + JLOG(log.trace()) << __func__ << " - Postgres response = " << hash + << " , " << prevHash << " , " << accountHash << " , " + << txHash << " , " << totalCoins << ", " << closeTime + << ", " << parentCloseTime << ", " << closeTimeRes + << ", " << closeFlags << ", " << ledgerSeq + << " - sql = " << sql.str(); + JLOG(log.debug()) << __func__ + << " - Successfully fetched ledger with sequence = " + << ledgerSeq << " from Postgres"; + + using time_point = NetClock::time_point; + using duration = NetClock::duration; + + LedgerInfo info; + if (!info.parentHash.parseHex(prevHash + 2)) + assert(false); + if (!info.txHash.parseHex(txHash + 2)) + assert(false); + if (!info.accountHash.parseHex(accountHash + 2)) + assert(false); + info.drops = totalCoins; + info.closeTime = time_point{duration{closeTime}}; + info.parentCloseTime = time_point{duration{parentCloseTime}}; + info.closeFlags = closeFlags; + info.closeTimeResolution = duration{closeTimeRes}; + info.seq = ledgerSeq; + if (!info.hash.parseHex(hash + 2)) + assert(false); + info.validated = true; + infos.push_back(info); + } + +#endif + return infos; +} + +/** + * @brief loadLedgerHelper Load a ledger info from Postgres + * @param pgPool Link to postgres database + * @param whichLedger Specifies sequence or hash of ledger. Passing + * std::monostate loads the most recent ledger + * @param app The Application + * @return Ledger info + */ +static std::optional +loadLedgerHelper( + std::shared_ptr const& pgPool, + std::variant const& whichLedger, + Application& app) +{ + std::vector infos; + std::visit( + [&infos, &app, &pgPool](auto&& arg) { + infos = loadLedgerInfos(pgPool, arg, app); + }, + whichLedger); + assert(infos.size() <= 1); + if (!infos.size()) + return {}; + return infos[0]; +} + +std::optional +getNewestLedgerInfo(std::shared_ptr const& pgPool, Application& app) +{ + return loadLedgerHelper(pgPool, {}, app); +} + +std::optional +getLedgerInfoByIndex( + std::shared_ptr const& pgPool, + std::uint32_t ledgerIndex, + Application& app) +{ + return loadLedgerHelper(pgPool, uint32_t{ledgerIndex}, app); +} + +std::optional +getLedgerInfoByHash( + std::shared_ptr const& pgPool, + uint256 const& ledgerHash, + Application& app) +{ + return loadLedgerHelper(pgPool, uint256{ledgerHash}, app); +} + +uint256 +getHashByIndex( + std::shared_ptr const& pgPool, + std::uint32_t ledgerIndex, + Application& app) +{ + auto infos = loadLedgerInfos(pgPool, ledgerIndex, app); + assert(infos.size() <= 1); + if (infos.size()) + return infos[0].hash; + return {}; +} + +bool +getHashesByIndex( + std::shared_ptr const& pgPool, + std::uint32_t ledgerIndex, + uint256& ledgerHash, + uint256& parentHash, + Application& app) +{ + auto infos = loadLedgerInfos(pgPool, ledgerIndex, app); + assert(infos.size() <= 1); + if (infos.size()) + { + ledgerHash = infos[0].hash; + parentHash = infos[0].parentHash; + return true; + } + return false; +} + +std::map +getHashesByIndex( + std::shared_ptr const& pgPool, + std::uint32_t minSeq, + std::uint32_t maxSeq, + Application& app) +{ + std::map ret; + auto infos = loadLedgerInfos(pgPool, std::make_pair(minSeq, maxSeq), app); + for (auto& info : infos) + { + ret[info.seq] = {info.hash, info.parentHash}; + } + return ret; +} + +std::vector +getTxHashes( + std::shared_ptr const& pgPool, + LedgerIndex seq, + Application& app) +{ + std::vector nodestoreHashes; + +#ifdef RIPPLED_REPORTING + auto log = app.journal("Ledger"); + + std::string query = + "SELECT nodestore_hash" + " FROM transactions " + " WHERE ledger_seq = " + + std::to_string(seq); + auto res = PgQuery(pgPool)(query.c_str()); + + if (!res) + { + JLOG(log.error()) << __func__ + << " : Postgres response is null - query = " << query; + assert(false); + return {}; + } + else if (res.status() != PGRES_TUPLES_OK) + { + JLOG(log.error()) << __func__ + << " : Postgres response should have been " + "PGRES_TUPLES_OK but instead was " + << res.status() << " - msg = " << res.msg() + << " - query = " << query; + assert(false); + return {}; + } + + JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); + + if (res.isNull() || res.ntuples() == 0) + { + JLOG(log.debug()) << __func__ + << " : Ledger not found. query = " << query; + return {}; + } + else if (res.ntuples() > 0) + { + if (res.nfields() != 1) + { + JLOG(log.error()) << __func__ + << " : Wrong number of fields in Postgres " + "response. Expected 1, but got " + << res.nfields() << " . query = " << query; + assert(false); + return {}; + } + } + + JLOG(log.trace()) << __func__ << " : result = " << res.c_str() + << " : query = " << query; + for (size_t i = 0; i < res.ntuples(); ++i) + { + char const* nodestoreHash = res.c_str(i, 0); + uint256 hash; + if (!hash.parseHex(nodestoreHash + 2)) + assert(false); + + nodestoreHashes.push_back(hash); + } +#endif + + return nodestoreHashes; +} + +#ifdef RIPPLED_REPORTING +enum class DataFormat { binary, expanded }; +static std::variant +flatFetchTransactions( + Application& app, + std::vector& nodestoreHashes, + std::vector& ledgerSequences, + DataFormat format) +{ + std::variant ret; + if (format == DataFormat::binary) + ret = TxnsDataBinary(); + else + ret = TxnsData(); + + std::vector< + std::pair, std::shared_ptr>> + txns = flatFetchTransactions(app, nodestoreHashes); + for (size_t i = 0; i < txns.size(); ++i) + { + auto& [txn, meta] = txns[i]; + if (format == DataFormat::binary) + { + auto& transactions = std::get(ret); + Serializer txnSer = txn->getSerializer(); + Serializer metaSer = meta->getSerializer(); + // SerialIter it(item->slice()); + Blob txnBlob = txnSer.getData(); + Blob metaBlob = metaSer.getData(); + transactions.push_back( + std::make_tuple(txnBlob, metaBlob, ledgerSequences[i])); + } + else + { + auto& transactions = std::get(ret); + std::string reason; + auto txnRet = std::make_shared(txn, reason, app); + txnRet->setLedger(ledgerSequences[i]); + txnRet->setStatus(COMMITTED); + auto txMeta = std::make_shared( + txnRet->getID(), ledgerSequences[i], *meta); + transactions.push_back(std::make_pair(txnRet, txMeta)); + } + } + return ret; +} + +static std::pair +processAccountTxStoredProcedureResult( + AccountTxArgs const& args, + Json::Value& result, + Application& app, + beast::Journal j) +{ + AccountTxResult ret; + ret.limit = args.limit; + + try + { + if (result.isMember("transactions")) + { + std::vector nodestoreHashes; + std::vector ledgerSequences; + for (auto& t : result["transactions"]) + { + if (t.isMember("ledger_seq") && t.isMember("nodestore_hash")) + { + uint32_t ledgerSequence = t["ledger_seq"].asUInt(); + std::string nodestoreHashHex = + t["nodestore_hash"].asString(); + nodestoreHashHex.erase(0, 2); + uint256 nodestoreHash; + if (!nodestoreHash.parseHex(nodestoreHashHex)) + assert(false); + + if (nodestoreHash.isNonZero()) + { + ledgerSequences.push_back(ledgerSequence); + nodestoreHashes.push_back(nodestoreHash); + } + else + { + assert(false); + return {ret, {rpcINTERNAL, "nodestoreHash is zero"}}; + } + } + else + { + assert(false); + return {ret, {rpcINTERNAL, "missing postgres fields"}}; + } + } + + assert(nodestoreHashes.size() == ledgerSequences.size()); + ret.transactions = flatFetchTransactions( + app, + nodestoreHashes, + ledgerSequences, + args.binary ? DataFormat::binary : DataFormat::expanded); + + JLOG(j.trace()) << __func__ << " : processed db results"; + + if (result.isMember("marker")) + { + auto& marker = result["marker"]; + assert(marker.isMember("ledger")); + assert(marker.isMember("seq")); + ret.marker = { + marker["ledger"].asUInt(), marker["seq"].asUInt()}; + } + assert(result.isMember("ledger_index_min")); + assert(result.isMember("ledger_index_max")); + ret.ledgerRange = { + result["ledger_index_min"].asUInt(), + result["ledger_index_max"].asUInt()}; + return {ret, rpcSUCCESS}; + } + else if (result.isMember("error")) + { + JLOG(j.debug()) + << __func__ << " : error = " << result["error"].asString(); + return { + ret, + RPC::Status{rpcINVALID_PARAMS, result["error"].asString()}}; + } + else + { + return {ret, {rpcINTERNAL, "unexpected Postgres response"}}; + } + } + catch (std::exception& e) + { + JLOG(j.debug()) << __func__ << " : " + << "Caught exception : " << e.what(); + return {ret, {rpcINTERNAL, e.what()}}; + } +} +#endif + +std::pair +getAccountTx( + std::shared_ptr const& pgPool, + AccountTxArgs const& args, + Application& app, + beast::Journal j) +{ +#ifdef RIPPLED_REPORTING + pg_params dbParams; + + char const*& command = dbParams.first; + std::vector>& values = dbParams.second; + command = + "SELECT account_tx($1::bytea, $2::bool, " + "$3::bigint, $4::bigint, $5::bigint, $6::bytea, " + "$7::bigint, $8::bool, $9::bigint, $10::bigint)"; + values.resize(10); + values[0] = "\\x" + strHex(args.account); + values[1] = args.forward ? "true" : "false"; + + static std::uint32_t const page_length(200); + if (args.limit == 0 || args.limit > page_length) + values[2] = std::to_string(page_length); + else + values[2] = std::to_string(args.limit); + + if (args.ledger) + { + if (auto range = std::get_if(&args.ledger.value())) + { + values[3] = std::to_string(range->min); + values[4] = std::to_string(range->max); + } + else if (auto hash = std::get_if(&args.ledger.value())) + { + values[5] = ("\\x" + strHex(*hash)); + } + else if ( + auto sequence = std::get_if(&args.ledger.value())) + { + values[6] = std::to_string(*sequence); + } + else if (std::get_if(&args.ledger.value())) + { + // current, closed and validated are all treated as validated + values[7] = "true"; + } + else + { + JLOG(j.error()) << "doAccountTxStoredProcedure - " + << "Error parsing ledger args"; + return {}; + } + } + + if (args.marker) + { + values[8] = std::to_string(args.marker->ledgerSeq); + values[9] = std::to_string(args.marker->txnSeq); + } + for (size_t i = 0; i < values.size(); ++i) + { + JLOG(j.trace()) << "value " << std::to_string(i) << " = " + << (values[i] ? values[i].value() : "null"); + } + + auto res = PgQuery(pgPool)(dbParams); + if (!res) + { + JLOG(j.error()) << __func__ + << " : Postgres response is null - account = " + << strHex(args.account); + assert(false); + return {{}, {rpcINTERNAL, "Postgres error"}}; + } + else if (res.status() != PGRES_TUPLES_OK) + { + JLOG(j.error()) << __func__ + << " : Postgres response should have been " + "PGRES_TUPLES_OK but instead was " + << res.status() << " - msg = " << res.msg() + << " - account = " << strHex(args.account); + assert(false); + return {{}, {rpcINTERNAL, "Postgres error"}}; + } + + JLOG(j.trace()) << __func__ << " Postgres result msg : " << res.msg(); + if (res.isNull() || res.ntuples() == 0) + { + JLOG(j.debug()) << __func__ + << " : No data returned from Postgres : account = " + << strHex(args.account); + + assert(false); + return {{}, {rpcINTERNAL, "Postgres error"}}; + } + + char const* resultStr = res.c_str(); + JLOG(j.trace()) << __func__ << " : " + << "postgres result = " << resultStr + << " : account = " << strHex(args.account); + + Json::Value v; + Json::Reader reader; + bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); + if (success) + { + return processAccountTxStoredProcedureResult(args, v, app, j); + } +#endif + // This shouldn't happen. Postgres should return a parseable error + assert(false); + return {{}, {rpcINTERNAL, "Failed to deserialize Postgres result"}}; +} + +Transaction::Locator +locateTransaction( + std::shared_ptr const& pgPool, + uint256 const& id, + Application& app) +{ +#ifdef RIPPLED_REPORTING + auto baseCmd = boost::format(R"(SELECT tx('%s');)"); + + std::string txHash = "\\x" + strHex(id); + std::string sql = boost::str(baseCmd % txHash); + + auto res = PgQuery(pgPool)(sql.data()); + + if (!res) + { + JLOG(app.journal("Transaction").error()) + << __func__ + << " : Postgres response is null - tx ID = " << strHex(id); + assert(false); + return {}; + } + else if (res.status() != PGRES_TUPLES_OK) + { + JLOG(app.journal("Transaction").error()) + << __func__ + << " : Postgres response should have been " + "PGRES_TUPLES_OK but instead was " + << res.status() << " - msg = " << res.msg() + << " - tx ID = " << strHex(id); + assert(false); + return {}; + } + + JLOG(app.journal("Transaction").trace()) + << __func__ << " Postgres result msg : " << res.msg(); + if (res.isNull() || res.ntuples() == 0) + { + JLOG(app.journal("Transaction").debug()) + << __func__ + << " : No data returned from Postgres : tx ID = " << strHex(id); + // This shouldn't happen + assert(false); + return {}; + } + + char const* resultStr = res.c_str(); + JLOG(app.journal("Transaction").debug()) + << "postgres result = " << resultStr; + + Json::Value v; + Json::Reader reader; + bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); + if (success) + { + if (v.isMember("nodestore_hash") && v.isMember("ledger_seq")) + { + uint256 nodestoreHash; + if (!nodestoreHash.parseHex( + v["nodestore_hash"].asString().substr(2))) + assert(false); + uint32_t ledgerSeq = v["ledger_seq"].asUInt(); + if (nodestoreHash.isNonZero()) + return {std::make_pair(nodestoreHash, ledgerSeq)}; + } + if (v.isMember("min_seq") && v.isMember("max_seq")) + { + return {ClosedInterval( + v["min_seq"].asUInt(), v["max_seq"].asUInt())}; + } + } +#endif + // Shouldn' happen. Postgres should return the ledger range searched if + // the transaction was not found + assert(false); + Throw( + "Transaction::Locate - Invalid Postgres response"); + return {}; +} + +#ifdef RIPPLED_REPORTING +static bool +writeToLedgersDB(LedgerInfo const& info, PgQuery& pgQuery, beast::Journal& j) +{ + JLOG(j.debug()) << __func__; + auto cmd = boost::format( + R"(INSERT INTO ledgers + VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))"); + + auto ledgerInsert = boost::str( + cmd % info.seq % strHex(info.hash) % strHex(info.parentHash) % + info.drops.drops() % info.closeTime.time_since_epoch().count() % + info.parentCloseTime.time_since_epoch().count() % + info.closeTimeResolution.count() % info.closeFlags % + strHex(info.accountHash) % strHex(info.txHash)); + JLOG(j.trace()) << __func__ << " : " + << " : " + << "query string = " << ledgerInsert; + + auto res = pgQuery(ledgerInsert.data()); + + return res; +} +#endif + +bool +writeLedgerAndTransactions( + std::shared_ptr const& pgPool, + LedgerInfo const& info, + std::vector const& accountTxData, + beast::Journal& j) +{ +#ifdef RIPPLED_REPORTING + JLOG(j.debug()) << __func__ << " : " + << "Beginning write to Postgres"; + + try + { + // Create a PgQuery object to run multiple commands over the same + // connection in a single transaction block. + PgQuery pg(pgPool); + auto res = pg("BEGIN"); + if (!res || res.status() != PGRES_COMMAND_OK) + { + std::stringstream msg; + msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); + Throw(msg.str()); + } + + // Writing to the ledgers db fails if the ledger already exists in the + // db. In this situation, the ETL process has detected there is another + // writer, and falls back to only publishing + if (!writeToLedgersDB(info, pg, j)) + { + JLOG(j.warn()) << __func__ << " : " + << "Failed to write to ledgers database."; + return false; + } + + std::stringstream transactionsCopyBuffer; + std::stringstream accountTransactionsCopyBuffer; + for (auto const& data : accountTxData) + { + std::string txHash = strHex(data.txHash); + std::string nodestoreHash = strHex(data.nodestoreHash); + auto idx = data.transactionIndex; + auto ledgerSeq = data.ledgerSequence; + + transactionsCopyBuffer << std::to_string(ledgerSeq) << '\t' + << std::to_string(idx) << '\t' << "\\\\x" + << txHash << '\t' << "\\\\x" << nodestoreHash + << '\n'; + + for (auto const& a : data.accounts) + { + std::string acct = strHex(a); + accountTransactionsCopyBuffer + << "\\\\x" << acct << '\t' << std::to_string(ledgerSeq) + << '\t' << std::to_string(idx) << '\n'; + } + } + + pg.bulkInsert("transactions", transactionsCopyBuffer.str()); + pg.bulkInsert( + "account_transactions", accountTransactionsCopyBuffer.str()); + + res = pg("COMMIT"); + if (!res || res.status() != PGRES_COMMAND_OK) + { + std::stringstream msg; + msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); + assert(false); + Throw(msg.str()); + } + + JLOG(j.info()) << __func__ << " : " + << "Successfully wrote to Postgres"; + return true; + } + catch (std::exception& e) + { + JLOG(j.error()) << __func__ << "Caught exception writing to Postgres : " + << e.what(); + assert(false); + return false; + } +#else + return false; +#endif +} + +std::vector> +getTxHistory( + std::shared_ptr const& pgPool, + LedgerIndex startIndex, + Application& app, + beast::Journal j) +{ + std::vector> ret; + +#ifdef RIPPLED_REPORTING + if (!app.config().reporting()) + { + assert(false); + Throw( + "called getTxHistory but not in reporting mode"); + } + + std::string sql = boost::str( + boost::format("SELECT nodestore_hash, ledger_seq " + " FROM transactions" + " ORDER BY ledger_seq DESC LIMIT 20 " + "OFFSET %u;") % + startIndex); + + auto res = PgQuery(pgPool)(sql.data()); + + if (!res) + { + JLOG(j.error()) << __func__ + << " : Postgres response is null - sql = " << sql; + assert(false); + return {}; + } + else if (res.status() != PGRES_TUPLES_OK) + { + JLOG(j.error()) << __func__ + << " : Postgres response should have been " + "PGRES_TUPLES_OK but instead was " + << res.status() << " - msg = " << res.msg() + << " - sql = " << sql; + assert(false); + return {}; + } + + JLOG(j.trace()) << __func__ << " Postgres result msg : " << res.msg(); + + if (res.isNull() || res.ntuples() == 0) + { + JLOG(j.debug()) << __func__ << " : Empty postgres response"; + assert(false); + return {}; + } + else if (res.ntuples() > 0) + { + if (res.nfields() != 2) + { + JLOG(j.error()) << __func__ + << " : Wrong number of fields in Postgres " + "response. Expected 1, but got " + << res.nfields() << " . sql = " << sql; + assert(false); + return {}; + } + } + + JLOG(j.trace()) << __func__ << " : Postgres result = " << res.c_str(); + + std::vector nodestoreHashes; + std::vector ledgerSequences; + for (size_t i = 0; i < res.ntuples(); ++i) + { + uint256 hash; + if (!hash.parseHex(res.c_str(i, 0) + 2)) + assert(false); + nodestoreHashes.push_back(hash); + ledgerSequences.push_back(res.asBigInt(i, 1)); + } + + auto txns = flatFetchTransactions(app, nodestoreHashes); + for (size_t i = 0; i < txns.size(); ++i) + { + auto const& [sttx, meta] = txns[i]; + assert(sttx); + + std::string reason; + auto txn = std::make_shared(sttx, reason, app); + txn->setLedger(ledgerSequences[i]); + txn->setStatus(COMMITTED); + ret.push_back(txn); + } + +#endif + return ret; +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp b/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp new file mode 100644 index 0000000000..70a65ce122 --- /dev/null +++ b/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp @@ -0,0 +1,370 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +DatabasePair +makeShardCompleteLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup) +{ + auto tx{std::make_unique( + setup, TxDBName, FinalShardDBPragma, TxDBInit)}; + tx->getSession() << boost::str( + boost::format("PRAGMA cache_size=-%d;") % + kilobytes(config.getValueFor(SizedItem::txnDBCache, std::nullopt))); + + auto lgr{std::make_unique( + setup, LgrDBName, FinalShardDBPragma, LgrDBInit)}; + lgr->getSession() << boost::str( + boost::format("PRAGMA cache_size=-%d;") % + kilobytes(config.getValueFor(SizedItem::lgrDBCache, std::nullopt))); + + return {std::move(lgr), std::move(tx)}; +} + +DatabasePair +makeShardIncompleteLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup) +{ + // transaction database + auto tx{std::make_unique( + setup, TxDBName, TxDBPragma, TxDBInit, checkpointerSetup)}; + tx->getSession() << boost::str( + boost::format("PRAGMA cache_size=-%d;") % + kilobytes(config.getValueFor(SizedItem::txnDBCache))); + + // ledger database + auto lgr{std::make_unique( + setup, LgrDBName, LgrDBPragma, LgrDBInit, checkpointerSetup)}; + lgr->getSession() << boost::str( + boost::format("PRAGMA cache_size=-%d;") % + kilobytes(config.getValueFor(SizedItem::lgrDBCache))); + + return {std::move(lgr), std::move(tx)}; +} + +bool +updateLedgerDBs( + soci::session& txsession, + soci::session& lgrsession, + std::shared_ptr const& ledger, + std::uint32_t index, + std::atomic& stop, + beast::Journal j) +{ + auto const ledgerSeq{ledger->info().seq}; + + // Update the transactions database + { + auto& session{txsession}; + soci::transaction tr(session); + + session << "DELETE FROM Transactions " + "WHERE LedgerSeq = :seq;", + soci::use(ledgerSeq); + session << "DELETE FROM AccountTransactions " + "WHERE LedgerSeq = :seq;", + soci::use(ledgerSeq); + + if (ledger->info().txHash.isNonZero()) + { + auto const sSeq{std::to_string(ledgerSeq)}; + if (!ledger->txMap().isValid()) + { + JLOG(j.error()) + << "shard " << index << " has an invalid transaction map" + << " on sequence " << sSeq; + return false; + } + + for (auto const& item : ledger->txs) + { + if (stop) + return false; + + auto const txID{item.first->getTransactionID()}; + auto const sTxID{to_string(txID)}; + auto const txMeta{std::make_shared( + txID, ledger->seq(), *item.second)}; + + session << "DELETE FROM AccountTransactions " + "WHERE TransID = :txID;", + soci::use(sTxID); + + auto const& accounts = txMeta->getAffectedAccounts(j); + if (!accounts.empty()) + { + auto const sTxnSeq{std::to_string(txMeta->getIndex())}; + auto const s{boost::str( + boost::format("('%s','%s',%s,%s)") % sTxID % "%s" % + sSeq % sTxnSeq)}; + std::string sql; + sql.reserve((accounts.size() + 1) * 128); + sql = + "INSERT INTO AccountTransactions " + "(TransID, Account, LedgerSeq, TxnSeq) VALUES "; + sql += boost::algorithm::join( + accounts | + boost::adaptors::transformed( + [&](AccountID const& accountID) { + return boost::str( + boost::format(s) % + ripple::toBase58(accountID)); + }), + ","); + sql += ';'; + session << sql; + + JLOG(j.trace()) + << "shard " << index << " account transaction: " << sql; + } + else + { + JLOG(j.warn()) + << "shard " << index << " transaction in ledger " + << sSeq << " affects no accounts"; + } + + Serializer s; + item.second->add(s); + session + << (STTx::getMetaSQLInsertReplaceHeader() + + item.first->getMetaSQL( + ledgerSeq, sqlBlobLiteral(s.modData())) + + ';'); + } + } + + tr.commit(); + } + + auto const sHash{to_string(ledger->info().hash)}; + + // Update the ledger database + { + auto& session{lgrsession}; + soci::transaction tr(session); + + auto const sParentHash{to_string(ledger->info().parentHash)}; + auto const sDrops{to_string(ledger->info().drops)}; + auto const sAccountHash{to_string(ledger->info().accountHash)}; + auto const sTxHash{to_string(ledger->info().txHash)}; + + session << "DELETE FROM Ledgers " + "WHERE LedgerSeq = :seq;", + soci::use(ledgerSeq); + session << "INSERT OR REPLACE INTO Ledgers (" + "LedgerHash, LedgerSeq, PrevHash, TotalCoins, ClosingTime," + "PrevClosingTime, CloseTimeRes, CloseFlags, AccountSetHash," + "TransSetHash)" + "VALUES (" + ":ledgerHash, :ledgerSeq, :prevHash, :totalCoins," + ":closingTime, :prevClosingTime, :closeTimeRes," + ":closeFlags, :accountSetHash, :transSetHash);", + soci::use(sHash), soci::use(ledgerSeq), soci::use(sParentHash), + soci::use(sDrops), + soci::use(ledger->info().closeTime.time_since_epoch().count()), + soci::use( + ledger->info().parentCloseTime.time_since_epoch().count()), + soci::use(ledger->info().closeTimeResolution.count()), + soci::use(ledger->info().closeFlags), soci::use(sAccountHash), + soci::use(sTxHash); + + tr.commit(); + } + + return true; +} + +/* Shard acquire db */ + +std::unique_ptr +makeAcquireDB( + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup) +{ + return std::make_unique( + setup, + AcquireShardDBName, + AcquireShardDBPragma, + AcquireShardDBInit, + checkpointerSetup); +} + +void +insertAcquireDBIndex(soci::session& session, std::uint32_t index) +{ + session << "INSERT INTO Shard (ShardIndex) " + "VALUES (:shardIndex);", + soci::use(index); +} + +std::pair> +selectAcquireDBLedgerSeqs(soci::session& session, std::uint32_t index) +{ + // resIndex and must be boost::optional (not std) because that's + // what SOCI expects in its interface. + boost::optional resIndex; + soci::blob sociBlob(session); + soci::indicator blobPresent; + + session << "SELECT ShardIndex, StoredLedgerSeqs " + "FROM Shard " + "WHERE ShardIndex = :index;", + soci::into(resIndex), soci::into(sociBlob, blobPresent), + soci::use(index); + + if (!resIndex || index != resIndex) + return {false, {}}; + + if (blobPresent != soci::i_ok) + return {true, {}}; + + std::string s; + convert(sociBlob, s); + + return {true, s}; +} + +std::pair +selectAcquireDBLedgerSeqsHash(soci::session& session, std::uint32_t index) +{ + // resIndex and sHash0 must be boost::optional (not std) because that's + // what SOCI expects in its interface. + boost::optional resIndex; + boost::optional sHash0; + soci::blob sociBlob(session); + soci::indicator blobPresent; + + session << "SELECT ShardIndex, LastLedgerHash, StoredLedgerSeqs " + "FROM Shard " + "WHERE ShardIndex = :index;", + soci::into(resIndex), soci::into(sHash0), + soci::into(sociBlob, blobPresent), soci::use(index); + + std::optional sHash = + (sHash0 ? *sHash0 : std::optional()); + + if (!resIndex || index != resIndex) + return {false, {{}, {}}}; + + if (blobPresent != soci::i_ok) + return {true, {{}, sHash}}; + + std::string s; + convert(sociBlob, s); + + return {true, {s, sHash}}; +} + +void +updateAcquireDB( + soci::session& session, + std::shared_ptr const& ledger, + std::uint32_t index, + std::uint32_t lastSeq, + std::optional const& seqs) +{ + soci::blob sociBlob(session); + auto const sHash{to_string(ledger->info().hash)}; + + if (seqs) + convert(*seqs, sociBlob); + + if (ledger->info().seq == lastSeq) + { + // Store shard's last ledger hash + session << "UPDATE Shard " + "SET LastLedgerHash = :lastLedgerHash," + "StoredLedgerSeqs = :storedLedgerSeqs " + "WHERE ShardIndex = :shardIndex;", + soci::use(sHash), soci::use(sociBlob), soci::use(index); + } + else + { + session << "UPDATE Shard " + "SET StoredLedgerSeqs = :storedLedgerSeqs " + "WHERE ShardIndex = :shardIndex;", + soci::use(sociBlob), soci::use(index); + } +} + +/* Archive DB */ + +std::unique_ptr +makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName) +{ + return std::make_unique( + dir, dbName, DownloaderDBPragma, ShardArchiveHandlerDBInit); +} + +void +readArchiveDB( + DatabaseCon& db, + std::function const& func) +{ + soci::rowset rs = + (db.getSession().prepare << "SELECT * FROM State;"); + + for (auto it = rs.begin(); it != rs.end(); ++it) + { + func(it->get(1), it->get(0)); + } +} + +void +insertArchiveDB( + DatabaseCon& db, + std::uint32_t shardIndex, + std::string const& url) +{ + db.getSession() << "INSERT INTO State VALUES (:index, :url);", + soci::use(shardIndex), soci::use(url); +} + +void +deleteFromArchiveDB(DatabaseCon& db, std::uint32_t shardIndex) +{ + db.getSession() << "DELETE FROM State WHERE ShardIndex = :index;", + soci::use(shardIndex); +} + +void +dropArchiveDB(DatabaseCon& db) +{ + db.getSession() << "DROP TABLE State;"; +} + +} // namespace ripple diff --git a/src/ripple/app/reporting/DBHelpers.cpp b/src/ripple/app/reporting/DBHelpers.cpp deleted file mode 100644 index 4a1793aa7d..0000000000 --- a/src/ripple/app/reporting/DBHelpers.cpp +++ /dev/null @@ -1,132 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifdef RIPPLED_REPORTING -#include -#include - -namespace ripple { - -static bool -writeToLedgersDB(LedgerInfo const& info, PgQuery& pgQuery, beast::Journal& j) -{ - JLOG(j.debug()) << __func__; - auto cmd = boost::format( - R"(INSERT INTO ledgers - VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))"); - - auto ledgerInsert = boost::str( - cmd % info.seq % strHex(info.hash) % strHex(info.parentHash) % - info.drops.drops() % info.closeTime.time_since_epoch().count() % - info.parentCloseTime.time_since_epoch().count() % - info.closeTimeResolution.count() % info.closeFlags % - strHex(info.accountHash) % strHex(info.txHash)); - JLOG(j.trace()) << __func__ << " : " - << " : " - << "query string = " << ledgerInsert; - - auto res = pgQuery(ledgerInsert.data()); - - return res; -} - -bool -writeToPostgres( - LedgerInfo const& info, - std::vector const& accountTxData, - std::shared_ptr const& pgPool, - beast::Journal& j) -{ - JLOG(j.debug()) << __func__ << " : " - << "Beginning write to Postgres"; - - try - { - // Create a PgQuery object to run multiple commands over the same - // connection in a single transaction block. - PgQuery pg(pgPool); - auto res = pg("BEGIN"); - if (!res || res.status() != PGRES_COMMAND_OK) - { - std::stringstream msg; - msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); - Throw(msg.str()); - } - - // Writing to the ledgers db fails if the ledger already exists in the - // db. In this situation, the ETL process has detected there is another - // writer, and falls back to only publishing - if (!writeToLedgersDB(info, pg, j)) - { - JLOG(j.warn()) << __func__ << " : " - << "Failed to write to ledgers database."; - return false; - } - - std::stringstream transactionsCopyBuffer; - std::stringstream accountTransactionsCopyBuffer; - for (auto const& data : accountTxData) - { - std::string txHash = strHex(data.txHash); - std::string nodestoreHash = strHex(data.nodestoreHash); - auto idx = data.transactionIndex; - auto ledgerSeq = data.ledgerSequence; - - transactionsCopyBuffer << std::to_string(ledgerSeq) << '\t' - << std::to_string(idx) << '\t' << "\\\\x" - << txHash << '\t' << "\\\\x" << nodestoreHash - << '\n'; - - for (auto const& a : data.accounts) - { - std::string acct = strHex(a); - accountTransactionsCopyBuffer - << "\\\\x" << acct << '\t' << std::to_string(ledgerSeq) - << '\t' << std::to_string(idx) << '\n'; - } - } - - pg.bulkInsert("transactions", transactionsCopyBuffer.str()); - pg.bulkInsert( - "account_transactions", accountTransactionsCopyBuffer.str()); - - res = pg("COMMIT"); - if (!res || res.status() != PGRES_COMMAND_OK) - { - std::stringstream msg; - msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); - assert(false); - Throw(msg.str()); - } - - JLOG(j.info()) << __func__ << " : " - << "Successfully wrote to Postgres"; - return true; - } - catch (std::exception& e) - { - JLOG(j.error()) << __func__ << "Caught exception writing to Postgres : " - << e.what(); - assert(false); - return false; - } -} - -} // namespace ripple -#endif diff --git a/src/ripple/app/reporting/DBHelpers.h b/src/ripple/app/reporting/DBHelpers.h deleted file mode 100644 index b8d672af9f..0000000000 --- a/src/ripple/app/reporting/DBHelpers.h +++ /dev/null @@ -1,69 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_DBHELPERS_H_INCLUDED -#define RIPPLE_APP_REPORTING_DBHELPERS_H_INCLUDED - -#include -#include -#include -#include - -namespace ripple { - -/// Struct used to keep track of what to write to transactions and -/// account_transactions tables in Postgres -struct AccountTransactionsData -{ - boost::container::flat_set accounts; - uint32_t ledgerSequence; - uint32_t transactionIndex; - uint256 txHash; - uint256 nodestoreHash; - - AccountTransactionsData( - TxMeta& meta, - uint256&& nodestoreHash, - beast::Journal& j) - : accounts(meta.getAffectedAccounts(j)) - , ledgerSequence(meta.getLgrSeq()) - , transactionIndex(meta.getIndex()) - , txHash(meta.getTxID()) - , nodestoreHash(std::move(nodestoreHash)) - { - } -}; - -#ifdef RIPPLED_REPORTING -/// Write new ledger and transaction data to Postgres -/// @param info Ledger Info to write -/// @param accountTxData transaction data to write -/// @param pgPool pool of Postgres connections -/// @param j journal (for logging) -/// @return whether the write succeeded -bool -writeToPostgres( - LedgerInfo const& info, - std::vector const& accountTxData, - std::shared_ptr const& pgPool, - beast::Journal& j); - -#endif -} // namespace ripple -#endif diff --git a/src/ripple/app/reporting/ReportingETL.cpp b/src/ripple/app/reporting/ReportingETL.cpp index bf49592ffb..b03d78d041 100644 --- a/src/ripple/app/reporting/ReportingETL.cpp +++ b/src/ripple/app/reporting/ReportingETL.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include #include #include @@ -165,8 +165,9 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) if (app_.config().reporting()) { #ifdef RIPPLED_REPORTING - writeToPostgres( - ledger->info(), accountTxData, app_.getPgPool(), journal_); + dynamic_cast( + &app_.getRelationalDBInterface()) + ->writeLedgerAndTransactions(ledger->info(), accountTxData); #endif } } @@ -589,69 +590,69 @@ ReportingETL::runETLPipeline(uint32_t startSequence) loadQueue.push({}); }}; - std::thread loader{[this, - &lastPublishedSequence, - &loadQueue, - &writeConflict]() { - beast::setCurrentThreadName("rippled: ReportingETL load"); - size_t totalTransactions = 0; - double totalTime = 0; - while (!writeConflict) - { - std::optional, - std::vector>> - result{loadQueue.pop()}; - // if result is an empty optional, the transformer thread has - // stopped and the loader should stop as well - if (!result) - break; - if (isStopping()) - continue; + std::thread loader{ + [this, &lastPublishedSequence, &loadQueue, &writeConflict]() { + beast::setCurrentThreadName("rippled: ReportingETL load"); + size_t totalTransactions = 0; + double totalTime = 0; + while (!writeConflict) + { + std::optional, + std::vector>> + result{loadQueue.pop()}; + // if result is an empty optional, the transformer thread has + // stopped and the loader should stop as well + if (!result) + break; + if (isStopping()) + continue; - auto& ledger = result->first; - auto& accountTxData = result->second; + auto& ledger = result->first; + auto& accountTxData = result->second; - auto start = std::chrono::system_clock::now(); - // write to the key-value store - flushLedger(ledger); + auto start = std::chrono::system_clock::now(); + // write to the key-value store + flushLedger(ledger); - auto mid = std::chrono::system_clock::now(); + auto mid = std::chrono::system_clock::now(); // write to RDBMS // if there is a write conflict, some other process has already // written this ledger and has taken over as the ETL writer #ifdef RIPPLED_REPORTING - if (!writeToPostgres( - ledger->info(), accountTxData, app_.getPgPool(), journal_)) - writeConflict = true; + if (!dynamic_cast( + &app_.getRelationalDBInterface()) + ->writeLedgerAndTransactions( + ledger->info(), accountTxData)) + writeConflict = true; #endif + auto end = std::chrono::system_clock::now(); - auto end = std::chrono::system_clock::now(); + if (!writeConflict) + { + publishLedger(ledger); + lastPublishedSequence = ledger->info().seq; + } + // print some performance numbers + auto kvTime = ((mid - start).count()) / 1000000000.0; + auto relationalTime = ((end - mid).count()) / 1000000000.0; - if (!writeConflict) - { - publishLedger(ledger); - lastPublishedSequence = ledger->info().seq; + size_t numTxns = accountTxData.size(); + totalTime += kvTime; + totalTransactions += numTxns; + JLOG(journal_.info()) + << "Load phase of etl : " + << "Successfully published ledger! Ledger info: " + << detail::toString(ledger->info()) + << ". txn count = " << numTxns + << ". key-value write time = " << kvTime + << ". relational write time = " << relationalTime + << ". key-value tps = " << numTxns / kvTime + << ". relational tps = " << numTxns / relationalTime + << ". total key-value tps = " + << totalTransactions / totalTime; } - // print some performance numbers - auto kvTime = ((mid - start).count()) / 1000000000.0; - auto relationalTime = ((end - mid).count()) / 1000000000.0; - - size_t numTxns = accountTxData.size(); - totalTime += kvTime; - totalTransactions += numTxns; - JLOG(journal_.info()) - << "Load phase of etl : " - << "Successfully published ledger! Ledger info: " - << detail::toString(ledger->info()) - << ". txn count = " << numTxns - << ". key-value write time = " << kvTime - << ". relational write time = " << relationalTime - << ". key-value tps = " << numTxns / kvTime - << ". relational tps = " << numTxns / relationalTime - << ". total key-value tps = " << totalTransactions / totalTime; - } - }}; + }}; // wait for all of the threads to stop loader.join(); diff --git a/src/ripple/app/reporting/ReportingETL.h b/src/ripple/app/reporting/ReportingETL.h index 5ccd8d90d4..e55cbe29e4 100644 --- a/src/ripple/app/reporting/ReportingETL.h +++ b/src/ripple/app/reporting/ReportingETL.h @@ -21,6 +21,7 @@ #define RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED #include +#include #include #include #include @@ -50,7 +51,7 @@ #include namespace ripple { -struct AccountTransactionsData; +using AccountTransactionsData = RelationalDBInterface::AccountTransactionsData; /** * This class is responsible for continuously extracting data from a diff --git a/src/ripple/core/ConfigSections.h b/src/ripple/core/ConfigSections.h index ba175b31d9..f5244738a0 100644 --- a/src/ripple/core/ConfigSections.h +++ b/src/ripple/core/ConfigSections.h @@ -76,6 +76,7 @@ struct ConfigSection #define SECTION_PEERS_IN_MAX "peers_in_max" #define SECTION_PEERS_OUT_MAX "peers_out_max" #define SECTION_REDUCE_RELAY "reduce_relay" +#define SECTION_RELATIONAL_DB "relational_db" #define SECTION_RELAY_PROPOSALS "relay_proposals" #define SECTION_RELAY_VALIDATIONS "relay_validations" #define SECTION_RPC_STARTUP "rpc_startup" diff --git a/src/ripple/core/SociDB.h b/src/ripple/core/SociDB.h index 0c66deed51..1a9ada1d6e 100644 --- a/src/ripple/core/SociDB.h +++ b/src/ripple/core/SociDB.h @@ -47,40 +47,20 @@ struct sqlite3; namespace ripple { -template -T -rangeCheckedCast(C c) -{ - if ((c > std::numeric_limits::max()) || - (!std::numeric_limits::is_signed && c < 0) || - (std::numeric_limits::is_signed && - std::numeric_limits::is_signed && - c < std::numeric_limits::lowest())) - { - JLOG(debugLog().error()) - << "rangeCheckedCast domain error:" - << " value = " << c << " min = " << std::numeric_limits::lowest() - << " max: " << std::numeric_limits::max(); - } - - return static_cast(c); -} - class BasicConfig; /** - SociConfig is used when a client wants to delay opening a soci::session after + DBConfig is used when a client wants to delay opening a soci::session after parsing the config parameters. If a client want to open a session immediately, use the free function "open" below. */ -class SociConfig +class DBConfig { std::string connectionString_; - soci::backend_factory const& backendFactory_; - SociConfig(std::pair init); + DBConfig(std::string const& dbPath); public: - SociConfig(BasicConfig const& config, std::string const& dbName); + DBConfig(BasicConfig const& config, std::string const& dbName); std::string connectionString() const; void diff --git a/src/ripple/core/impl/SociDB.cpp b/src/ripple/core/impl/SociDB.cpp index 5c81d9f9e0..4128c81382 100644 --- a/src/ripple/core/impl/SociDB.cpp +++ b/src/ripple/core/impl/SociDB.cpp @@ -38,7 +38,7 @@ static auto checkpointPageCount = 1000; namespace detail { -std::pair +std::string getSociSqliteInit( std::string const& name, std::string const& dir, @@ -53,10 +53,10 @@ getSociSqliteInit( boost::filesystem::path file(dir); if (is_directory(file)) file /= name + ext; - return std::make_pair(file.string(), std::ref(soci::sqlite3)); + return file.string(); } -std::pair +std::string getSociInit(BasicConfig const& config, std::string const& dbName) { auto const& section = config.section("sqdb"); @@ -73,33 +73,31 @@ getSociInit(BasicConfig const& config, std::string const& dbName) } // namespace detail -SociConfig::SociConfig( - std::pair init) - : connectionString_(std::move(init.first)), backendFactory_(init.second) +DBConfig::DBConfig(std::string const& dbPath) : connectionString_(dbPath) { } -SociConfig::SociConfig(BasicConfig const& config, std::string const& dbName) - : SociConfig(detail::getSociInit(config, dbName)) +DBConfig::DBConfig(BasicConfig const& config, std::string const& dbName) + : DBConfig(detail::getSociInit(config, dbName)) { } std::string -SociConfig::connectionString() const +DBConfig::connectionString() const { return connectionString_; } void -SociConfig::open(soci::session& s) const +DBConfig::open(soci::session& s) const { - s.open(backendFactory_, connectionString()); + s.open(soci::sqlite3, connectionString()); } void open(soci::session& s, BasicConfig const& config, std::string const& dbName) { - SociConfig(config, dbName).open(s); + DBConfig(config, dbName).open(s); } void diff --git a/src/ripple/net/impl/DatabaseBody.ipp b/src/ripple/net/impl/DatabaseBody.ipp index fda3b6bdaf..e072d998e0 100644 --- a/src/ripple/net/impl/DatabaseBody.ipp +++ b/src/ripple/net/impl/DatabaseBody.ipp @@ -17,6 +17,8 @@ */ //============================================================================== +#include + namespace ripple { inline void @@ -47,47 +49,16 @@ DatabaseBody::value_type::open( boost::system::error_code& ec) { strand_.reset(new boost::asio::io_service::strand(io_service)); + path_ = path; auto setup = setup_DatabaseCon(config); setup.dataDir = path.parent_path(); setup.useGlobalPragma = false; - // Downloader ignores the "CommonPragma" - conn_ = std::make_unique( - setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); - - path_ = path; - - auto db = conn_->checkoutDb(); - - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional pathFromDb; - - *db << "SELECT Path FROM Download WHERE Part=0;", soci::into(pathFromDb); - - // Try to reuse preexisting - // database. - if (pathFromDb) - { - // Can't resuse - database was - // from a different file download. - if (pathFromDb != path.string()) - { - *db << "DROP TABLE Download;"; - } - - // Continuing a file download. - else - { - // SOCI requires boost::optional (not std::optional) parameter. - boost::optional size; - - *db << "SELECT SUM(LENGTH(Data)) FROM Download;", soci::into(size); - - if (size) - fileSize_ = size.get(); - } - } + auto [conn, size] = openDatabaseBodyDb(setup, path); + conn_ = std::move(conn); + if (size) + fileSize_ = *size; } // This is called from message::payload_size @@ -206,65 +177,13 @@ DatabaseBody::reader::do_put(std::string data) } auto path = body_.path_.string(); - std::uint64_t rowSize = 0; - soci::indicator rti; - std::uint64_t remainingInRow = 0; - - auto db = body_.conn_->checkoutDb(); - - auto be = dynamic_cast(db->get_backend()); - BOOST_ASSERT(be); - - // This limits how large we can make the blob - // in each row. Also subtract a pad value to - // account for the other values in the row. - auto const blobMaxSize = - sqlite_api::sqlite3_limit(be->conn_, SQLITE_LIMIT_LENGTH, -1) - - MAX_ROW_SIZE_PAD; - - auto rowInit = [&] { - *db << "INSERT INTO Download VALUES (:path, zeroblob(0), 0, :part)", - soci::use(path), soci::use(body_.part_); - - remainingInRow = blobMaxSize; - rowSize = 0; - }; - - *db << "SELECT Path,Size,Part FROM Download ORDER BY Part DESC " - "LIMIT 1", - soci::into(path), soci::into(rowSize), soci::into(body_.part_, rti); - - if (!db->got_data()) - rowInit(); - else - remainingInRow = blobMaxSize - rowSize; - - auto insert = [&db, &rowSize, &part = body_.part_, &fs = body_.fileSize_]( - auto const& data) { - std::uint64_t updatedSize = rowSize + data.size(); - - *db << "UPDATE Download SET Data = CAST(Data || :data AS blob), " - "Size = :size WHERE Part = :part;", - soci::use(data), soci::use(updatedSize), soci::use(part); - - fs += data.size(); - }; - - while (remainingInRow < data.size()) { - if (remainingInRow) - { - insert(data.substr(0, remainingInRow)); - data.erase(0, remainingInRow); - } - - ++body_.part_; - rowInit(); + auto db = body_.conn_->checkoutDb(); + body_.part_ = databaseBodyDoPut( + *db, data, path, body_.fileSize_, body_.part_, MAX_ROW_SIZE_PAD); } - insert(data); - bool const notify = [this] { std::lock_guard lock(body_.m_); return --body_.handlerCount_ == 0; @@ -290,17 +209,13 @@ DatabaseBody::reader::finish(boost::system::error_code& ec) } } - auto db = body_.conn_->checkoutDb(); - - soci::rowset rs = - (db->prepare << "SELECT Data FROM Download ORDER BY PART ASC;"); - std::ofstream fout; fout.open(body_.path_.string(), std::ios::binary | std::ios::out); - // iteration through the resultset: - for (auto it = rs.begin(); it != rs.end(); ++it) - fout.write(it->data(), it->size()); + { + auto db = body_.conn_->checkoutDb(); + databaseBodyFinish(*db, fout); + } // Flush any pending data that hasn't // been been written to the DB. diff --git a/src/ripple/nodestore/DatabaseShard.h b/src/ripple/nodestore/DatabaseShard.h index 826b09afdb..4c1efdfa5d 100644 --- a/src/ripple/nodestore/DatabaseShard.h +++ b/src/ripple/nodestore/DatabaseShard.h @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -136,6 +137,102 @@ public: virtual std::string getCompleteShards() = 0; + /** + * @brief callForLedgerSQL Checkouts ledger database for shard + * containing given ledger and calls given callback function passing + * shard index and session with the database to it. + * @param ledgerSeq Ledger sequence. + * @param callback Callback function to call. + * @return Value returned by callback function. + */ + virtual bool + callForLedgerSQL( + LedgerIndex ledgerSeq, + std::function const& + callback) = 0; + + /** + * @brief callForTransactionSQL Checkouts transaction database for shard + * containing given ledger and calls given callback function passing + * shard index and session with the database to it. + * @param ledgerSeq Ledger sequence. + * @param callback Callback function to call. + * @return Value returned by callback function. + */ + virtual bool + callForTransactionSQL( + LedgerIndex ledgerSeq, + std::function const& + callback) = 0; + + /** + * @brief iterateLedgerSQLsForward Checkouts ledger databases for all + * shards in ascending order starting from given shard index until + * shard with the largest index visited or callback returned false. + * For each visited shard calls given callback function passing + * shard index and session with the database to it. + * @param minShardIndex Start shard index to visit or none if all shards + * should be visited. + * @param callback Callback function to call. + * @return True if each callback function returns true, false otherwise. + */ + virtual bool + iterateLedgerSQLsForward( + std::optional minShardIndex, + std::function const& + callback) = 0; + + /** + * @brief iterateTransactionSQLsForward Checkouts transaction databases for + * all shards in ascending order starting from given shard index + * until shard with the largest index visited or callback returned + * false. For each visited shard calls given callback function + * passing shard index and session with the database to it. + * @param minShardIndex Start shard index to visit or none if all shards + * should be visited. + * @param callback Callback function to call. + * @return True if each callback function returns true, false otherwise. + */ + virtual bool + iterateTransactionSQLsForward( + std::optional minShardIndex, + std::function const& + callback) = 0; + + /** + * @brief iterateLedgerSQLsBack Checkouts ledger databases for + * all shards in descending order starting from given shard index + * until shard with the smallest index visited or callback returned + * false. For each visited shard calls given callback function + * passing shard index and session with the database to it. + * @param maxShardIndex Start shard index to visit or none if all shards + * should be visited. + * @param callback Callback function to call. + * @return True if each callback function returns true, false otherwise. + */ + virtual bool + iterateLedgerSQLsBack( + std::optional maxShardIndex, + std::function const& + callback) = 0; + + /** + * @brief iterateTransactionSQLsBack Checkouts transaction databases for + * all shards in descending order starting from given shard index + * until shard with the smallest index visited or callback returned + * false. For each visited shard calls given callback function + * passing shard index and session with the database to it. + * @param maxShardIndex Start shard index to visit or none if all shards + * should be visited. + * @param callback Callback function to call. + * @return True if each callback function returns true, false otherwise. + */ + virtual bool + iterateTransactionSQLsBack( + std::optional maxShardIndex, + std::function const& + callback) = 0; + /** @return The maximum number of ledgers stored in a shard */ virtual std::uint32_t diff --git a/src/ripple/nodestore/impl/DatabaseShardImp.cpp b/src/ripple/nodestore/impl/DatabaseShardImp.cpp index a56d9e1ce6..c3e337ef7f 100644 --- a/src/ripple/nodestore/impl/DatabaseShardImp.cpp +++ b/src/ripple/nodestore/impl/DatabaseShardImp.cpp @@ -1,4 +1,4 @@ -//------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled Copyright (c) 2012, 2017 Ripple Labs Inc. @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -37,6 +38,7 @@ #endif namespace ripple { + namespace NodeStore { DatabaseShardImp::DatabaseShardImp( @@ -752,14 +754,27 @@ DatabaseShardImp::import(Database& source) auto loadLedger = [&](bool ascendSort = true) -> std::optional { std::shared_ptr ledger; - std::uint32_t ledgerSeq; - std::tie(ledger, ledgerSeq, std::ignore) = loadLedgerHelper( - "WHERE LedgerSeq >= " + - std::to_string(earliestLedgerSeq()) + - " order by LedgerSeq " + (ascendSort ? "asc" : "desc") + - " limit 1", - app_, - false); + std::uint32_t ledgerSeq{0}; + std::optional info; + if (ascendSort) + { + info = + dynamic_cast( + &app_.getRelationalDBInterface()) + ->getLimitedOldestLedgerInfo(earliestLedgerSeq()); + } + else + { + info = + dynamic_cast( + &app_.getRelationalDBInterface()) + ->getLimitedNewestLedgerInfo(earliestLedgerSeq()); + } + if (info) + { + ledger = loadLedgerHelper(*info, app_, false); + ledgerSeq = info->seq; + } if (!ledger || ledgerSeq == 0) { JLOG(j_.error()) << "no suitable ledgers were found in" @@ -844,14 +859,16 @@ DatabaseShardImp::import(Database& source) auto const numLedgers{ shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1 : ledgersPerShard_}; - auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)}; + auto ledgerHashes{ + app_.getRelationalDBInterface().getHashesByIndex( + firstSeq, lastSeq)}; if (ledgerHashes.size() != numLedgers) continue; bool valid{true}; for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256) { - if (!source.fetchNodeObject(ledgerHashes[n].first, n)) + if (!source.fetchNodeObject(ledgerHashes[n].ledgerHash, n)) { JLOG(j_.warn()) << "SQLite ledger sequence " << n << " mismatches node store"; @@ -1887,6 +1904,144 @@ DatabaseShardImp::checkHistoricalPaths() const return true; } +bool +DatabaseShardImp::callForLedgerSQL( + LedgerIndex ledgerSeq, + std::function const& + callback) +{ + std::lock_guard lock(mutex_); + auto shardIndex = seqToShardIndex(ledgerSeq); + + if (shards_.count(shardIndex) && + shards_[shardIndex]->getState() == Shard::State::final) + { + return shards_[shardIndex]->callForLedgerSQL(callback); + } + + return false; +} + +bool +DatabaseShardImp::callForTransactionSQL( + LedgerIndex ledgerSeq, + std::function const& + callback) +{ + std::lock_guard lock(mutex_); + auto shardIndex = seqToShardIndex(ledgerSeq); + + if (shards_.count(shardIndex) && + shards_[shardIndex]->getState() == Shard::State::final) + { + return shards_[shardIndex]->callForTransactionSQL(callback); + } + + return false; +} + +bool +DatabaseShardImp::iterateShardsForward( + std::optional minShardIndex, + std::function const& visit) +{ + std::lock_guard lock(mutex_); + + std::map>::iterator it, eit; + + if (!minShardIndex) + it = shards_.begin(); + else + it = shards_.lower_bound(*minShardIndex); + + eit = shards_.end(); + + for (; it != eit; it++) + { + if (it->second->getState() == Shard::State::final) + { + if (!visit(*it->second)) + return false; + } + } + + return true; +} +bool +DatabaseShardImp::iterateLedgerSQLsForward( + std::optional minShardIndex, + std::function const& + callback) +{ + return iterateShardsForward( + minShardIndex, [&callback](Shard& shard) -> bool { + return shard.callForLedgerSQL(callback); + }); +} + +bool +DatabaseShardImp::iterateTransactionSQLsForward( + std::optional minShardIndex, + std::function const& + callback) +{ + return iterateShardsForward( + minShardIndex, [&callback](Shard& shard) -> bool { + return shard.callForTransactionSQL(callback); + }); +} + +bool +DatabaseShardImp::iterateShardsBack( + std::optional maxShardIndex, + std::function const& visit) +{ + std::lock_guard lock(mutex_); + + std::map>::reverse_iterator it, eit; + + if (!maxShardIndex) + it = shards_.rbegin(); + else + it = std::make_reverse_iterator(shards_.upper_bound(*maxShardIndex)); + + eit = shards_.rend(); + + for (; it != eit; it++) + { + if (it->second->getState() == Shard::State::final && + (!maxShardIndex || it->first <= *maxShardIndex)) + { + if (!visit(*it->second)) + return false; + } + } + + return true; +} + +bool +DatabaseShardImp::iterateLedgerSQLsBack( + std::optional maxShardIndex, + std::function const& + callback) +{ + return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool { + return shard.callForLedgerSQL(callback); + }); +} + +bool +DatabaseShardImp::iterateTransactionSQLsBack( + std::optional maxShardIndex, + std::function const& + callback) +{ + return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool { + return shard.callForTransactionSQL(callback); + }); +} + //------------------------------------------------------------------------------ std::unique_ptr diff --git a/src/ripple/nodestore/impl/DatabaseShardImp.h b/src/ripple/nodestore/impl/DatabaseShardImp.h index 888fcfbe47..3d5028c51e 100644 --- a/src/ripple/nodestore/impl/DatabaseShardImp.h +++ b/src/ripple/nodestore/impl/DatabaseShardImp.h @@ -161,6 +161,42 @@ public: void sweep() override; + bool + callForLedgerSQL( + LedgerIndex ledgerSeq, + std::function const& + callback) override; + + bool + callForTransactionSQL( + LedgerIndex ledgerSeq, + std::function const& + callback) override; + + bool + iterateLedgerSQLsForward( + std::optional minShardIndex, + std::function const& + callback) override; + + bool + iterateTransactionSQLsForward( + std::optional minShardIndex, + std::function const& + callback) override; + + bool + iterateLedgerSQLsBack( + std::optional maxShardIndex, + std::function const& + callback) override; + + bool + iterateTransactionSQLsBack( + std::optional maxShardIndex, + std::function const& + callback) override; + private: enum class PathDesignation : uint8_t { none, // No path specified @@ -179,7 +215,7 @@ private: std::unique_ptr taskQueue_; // Shards held by this server - std::unordered_map> shards_; + std::map> shards_; // Shard indexes being imported std::set preparedIndexes_; @@ -327,6 +363,34 @@ private: bool checkHistoricalPaths() const; + + /** + * @brief iterateShardsForward Visits all shards starting from given + * in ascending order and calls given callback function to each + * of them passing shard as parameter. + * @param minShardIndex Start shard index to visit or none if all shards + * should be visited. + * @param visit Callback function to call. + * @return True if each callback function returned true, false otherwise. + */ + bool + iterateShardsForward( + std::optional minShardIndex, + std::function const& visit); + + /** + * @brief iterateShardsBack Visits all shards starting from given + * in descending order and calls given callback function to each + * of them passing shard as parameter. + * @param maxShardIndex Start shard index to visit or none if all shards + * should be visited. + * @param visit Callback function to call. + * @return True if each callback function returned true, false otherwise. + */ + bool + iterateShardsBack( + std::optional maxShardIndex, + std::function const& visit); }; } // namespace NodeStore diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index e9bce3a7ca..378af695e5 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -19,6 +19,8 @@ #include #include +#include +#include #include #include #include @@ -617,36 +619,23 @@ Shard::finalize(bool writeSQLite, std::optional const& referenceHash) if (!acquireInfo_) return fail("missing acquire SQLite database"); - // index and sHash must be boost::optional (not std) because that's - // what SOCI expects in its interface. - auto session{acquireInfo_->SQLiteDB->checkoutDb()}; - boost::optional index; - boost::optional sHash; - soci::blob sociBlob(*session); - soci::indicator blobPresent; - *session << "SELECT ShardIndex, LastLedgerHash, StoredLedgerSeqs " - "FROM Shard " - "WHERE ShardIndex = :index;", - soci::into(index), soci::into(sHash), - soci::into(sociBlob, blobPresent), soci::use(index_); + auto [res, seqshash] = selectAcquireDBLedgerSeqsHash( + *acquireInfo_->SQLiteDB->checkoutDb(), index_); - if (!index || index != index_) + if (!res) return fail("missing or invalid ShardIndex"); - if (!sHash) + if (!seqshash.hash) return fail("missing LastLedgerHash"); - if (!hash.parseHex(*sHash) || hash.isZero()) + if (!hash.parseHex(*seqshash.hash) || hash.isZero()) return fail("invalid LastLedgerHash"); - if (blobPresent != soci::i_ok) + if (!seqshash.sequences) return fail("missing StoredLedgerSeqs"); - std::string s; - convert(sociBlob, s); - auto& storedSeqs{acquireInfo_->storedSeqs}; - if (!from_string(storedSeqs, s) || + if (!from_string(storedSeqs, *seqshash.sequences) || boost::icl::first(storedSeqs) != firstSeq_ || boost::icl::last(storedSeqs) != lastSeq_ || storedSeqs.size() != maxLedgers_) @@ -871,11 +860,8 @@ Shard::open(std::lock_guard const& lock) setup.dataDir = dir_; setup.useGlobalPragma = true; - acquireInfo_->SQLiteDB = std::make_unique( + acquireInfo_->SQLiteDB = makeAcquireDB( setup, - AcquireShardDBName, - AcquireShardDBPragma, - AcquireShardDBInit, DatabaseCon::CheckpointerSetup{&app_.getJobQueue(), &app_.logs()}); state_ = acquire; }; @@ -890,37 +876,22 @@ Shard::open(std::lock_guard const& lock) { // A new shard createAcquireInfo(); - acquireInfo_->SQLiteDB->getSession() - << "INSERT INTO Shard (ShardIndex) " - "VALUES (:shardIndex);", - soci::use(index_); + insertAcquireDBIndex(acquireInfo_->SQLiteDB->getSession(), index_); } else if (exists(dir_ / AcquireShardDBName)) { // A shard being acquired, backend is likely incomplete createAcquireInfo(); + auto [res, s] = selectAcquireDBLedgerSeqs( + acquireInfo_->SQLiteDB->getSession(), index_); - // index and must be boost::optional (not std) because that's - // what SOCI expects in its interface. - auto& session{acquireInfo_->SQLiteDB->getSession()}; - boost::optional index; - soci::blob sociBlob(session); - soci::indicator blobPresent; - - session << "SELECT ShardIndex, StoredLedgerSeqs " - "FROM Shard " - "WHERE ShardIndex = :index;", - soci::into(index), soci::into(sociBlob, blobPresent), - soci::use(index_); - - if (!index || index != index_) + if (!res) return fail("invalid acquire SQLite database"); - if (blobPresent == soci::i_ok) + if (s) { - std::string s; auto& storedSeqs{acquireInfo_->storedSeqs}; - if (convert(sociBlob, s); !from_string(storedSeqs, s)) + if (!from_string(storedSeqs, *s)) return fail("invalid StoredLedgerSeqs"); if (boost::icl::first(storedSeqs) < firstSeq_ || @@ -1003,44 +974,19 @@ Shard::initSQLite(std::lock_guard const&) if (state_ == final) { - lgrSQLiteDB_ = std::make_unique( - setup, LgrDBName, FinalShardDBPragma, LgrDBInit); - lgrSQLiteDB_->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes( - config.getValueFor(SizedItem::lgrDBCache, std::nullopt))); - - txSQLiteDB_ = std::make_unique( - setup, TxDBName, FinalShardDBPragma, TxDBInit); - txSQLiteDB_->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes( - config.getValueFor(SizedItem::txnDBCache, std::nullopt))); + auto [lgr, tx] = makeShardCompleteLedgerDBs(config, setup); + txSQLiteDB_ = std::move(tx); + lgrSQLiteDB_ = std::move(lgr); } else { - // Non final shards use a Write Ahead Log for performance - lgrSQLiteDB_ = std::make_unique( + auto [lgr, tx] = makeShardIncompleteLedgerDBs( + config, setup, - LgrDBName, - LgrDBPragma, - LgrDBInit, DatabaseCon::CheckpointerSetup{ &app_.getJobQueue(), &app_.logs()}); - lgrSQLiteDB_->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor(SizedItem::lgrDBCache))); - - txSQLiteDB_ = std::make_unique( - setup, - TxDBName, - TxDBPragma, - TxDBInit, - DatabaseCon::CheckpointerSetup{ - &app_.getJobQueue(), &app_.logs()}); - txSQLiteDB_->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor(SizedItem::txnDBCache))); + txSQLiteDB_ = std::move(tx); + lgrSQLiteDB_ = std::move(lgr); } } catch (std::exception const& e) @@ -1060,127 +1006,32 @@ Shard::storeSQLite(std::shared_ptr const& ledger) if (stop_) return false; - auto const ledgerSeq{ledger->info().seq}; - try { - // Update the transactions database + auto res = updateLedgerDBs( + *txSQLiteDB_->checkoutDb(), + *lgrSQLiteDB_->checkoutDb(), + ledger, + index_, + stop_, + j_); + + if (!res) + return false; + + // Update the acquire database if present + if (acquireInfo_) { - auto session{txSQLiteDB_->checkoutDb()}; - soci::transaction tr(*session); + std::optional s; + if (!acquireInfo_->storedSeqs.empty()) + s = to_string(acquireInfo_->storedSeqs); - *session << "DELETE FROM Transactions " - "WHERE LedgerSeq = :seq;", - soci::use(ledgerSeq); - *session << "DELETE FROM AccountTransactions " - "WHERE LedgerSeq = :seq;", - soci::use(ledgerSeq); - - if (ledger->info().txHash.isNonZero()) - { - auto const sSeq{std::to_string(ledgerSeq)}; - if (!ledger->txMap().isValid()) - { - JLOG(j_.error()) << "shard " << index_ - << " has an invalid transaction map" - << " on sequence " << sSeq; - return false; - } - - for (auto const& item : ledger->txs) - { - if (stop_) - return false; - - auto const txID{item.first->getTransactionID()}; - auto const sTxID{to_string(txID)}; - auto const txMeta{std::make_shared( - txID, ledger->seq(), *item.second)}; - - *session << "DELETE FROM AccountTransactions " - "WHERE TransID = :txID;", - soci::use(sTxID); - - auto const& accounts = txMeta->getAffectedAccounts(j_); - if (!accounts.empty()) - { - auto const sTxnSeq{std::to_string(txMeta->getIndex())}; - auto const s{boost::str( - boost::format("('%s','%s',%s,%s)") % sTxID % "%s" % - sSeq % sTxnSeq)}; - std::string sql; - sql.reserve((accounts.size() + 1) * 128); - sql = - "INSERT INTO AccountTransactions " - "(TransID, Account, LedgerSeq, TxnSeq) VALUES "; - sql += boost::algorithm::join( - accounts | - boost::adaptors::transformed( - [&](AccountID const& accountID) { - return boost::str( - boost::format(s) % - ripple::toBase58(accountID)); - }), - ","); - sql += ';'; - *session << sql; - - JLOG(j_.trace()) << "shard " << index_ - << " account transaction: " << sql; - } - else - { - JLOG(j_.warn()) - << "shard " << index_ << " transaction in ledger " - << sSeq << " affects no accounts"; - } - - Serializer s; - item.second->add(s); - *session - << (STTx::getMetaSQLInsertReplaceHeader() + - item.first->getMetaSQL( - ledgerSeq, sqlBlobLiteral(s.modData())) + - ';'); - } - } - - tr.commit(); - } - - // Update the ledger database - { - auto const sParentHash{to_string(ledger->info().parentHash)}; - auto const sDrops{to_string(ledger->info().drops)}; - auto const sAccountHash{to_string(ledger->info().accountHash)}; - auto const sTxHash{to_string(ledger->info().txHash)}; - auto const sHash{to_string(ledger->info().hash)}; - - auto session{lgrSQLiteDB_->checkoutDb()}; - soci::transaction tr(*session); - - *session << "DELETE FROM Ledgers " - "WHERE LedgerSeq = :seq;", - soci::use(ledgerSeq); - *session - << "INSERT OR REPLACE INTO Ledgers (" - "LedgerHash, LedgerSeq, PrevHash, TotalCoins, ClosingTime," - "PrevClosingTime, CloseTimeRes, CloseFlags, AccountSetHash," - "TransSetHash)" - "VALUES (" - ":ledgerHash, :ledgerSeq, :prevHash, :totalCoins," - ":closingTime, :prevClosingTime, :closeTimeRes," - ":closeFlags, :accountSetHash, :transSetHash);", - soci::use(sHash), soci::use(ledgerSeq), soci::use(sParentHash), - soci::use(sDrops), - soci::use(ledger->info().closeTime.time_since_epoch().count()), - soci::use( - ledger->info().parentCloseTime.time_since_epoch().count()), - soci::use(ledger->info().closeTimeResolution.count()), - soci::use(ledger->info().closeFlags), soci::use(sAccountHash), - soci::use(sTxHash); - - tr.commit(); + updateAcquireDB( + acquireInfo_->SQLiteDB->getSession(), + ledger, + index_, + lastSeq_, + s); } } catch (std::exception const& e) @@ -1366,5 +1217,31 @@ Shard::makeBackendCount() return Shard::Count(&backendCount_); } +bool +Shard::callForLedgerSQL( + std::function const& + callback) +{ + auto const scopedCount{makeBackendCount()}; + if (!scopedCount) + return false; + + auto db = lgrSQLiteDB_->checkoutDb(); + return callback(*db, index_); +} + +bool +Shard::callForTransactionSQL( + std::function const& + callback) +{ + auto const scopedCount{makeBackendCount()}; + if (!scopedCount) + return false; + + auto db = txSQLiteDB_->checkoutDb(); + return callback(*db, index_); +} + } // namespace NodeStore } // namespace ripple diff --git a/src/ripple/nodestore/impl/Shard.h b/src/ripple/nodestore/impl/Shard.h index 574e912b49..d21d75266b 100644 --- a/src/ripple/nodestore/impl/Shard.h +++ b/src/ripple/nodestore/impl/Shard.h @@ -21,6 +21,7 @@ #define RIPPLE_NODESTORE_SHARD_H_INCLUDED #include +#include #include #include #include @@ -196,6 +197,30 @@ public: removeOnDestroy_ = true; } + /** + * @brief callForLedgerSQL Checks out ledger database for the shard and + * calls given callback function passing shard index and session + * with the database to it. + * @param callback Callback function to call. + * @return Value returned by callback function. + */ + bool + callForLedgerSQL( + std::function const& + callback); + + /** + * @brief callForTransactionSQL Checks out transaction database for the + * shard and calls given callback function passing shard index and + * session with the database to it. + * @param callback Callback function to call. + * @return Value returned by callback function. + */ + bool + callForTransactionSQL( + std::function const& + callback); + // Current shard version static constexpr std::uint32_t version{2}; diff --git a/src/ripple/overlay/impl/OverlayImpl.cpp b/src/ripple/overlay/impl/OverlayImpl.cpp index 451bccf189..aecb516b17 100644 --- a/src/ripple/overlay/impl/OverlayImpl.cpp +++ b/src/ripple/overlay/impl/OverlayImpl.cpp @@ -22,10 +22,11 @@ #include #include #include +#include +#include #include #include #include -#include #include #include #include @@ -689,15 +690,7 @@ OverlayImpl::onManifests( if (app_.validators().listed(mo->masterKey)) { auto db = app_.getWalletDB().checkoutDb(); - - soci::transaction tr(*db); - static const char* const sql = - "INSERT INTO ValidatorManifests (RawData) VALUES " - "(:rawData);"; - soci::blob rawData(*db); - convert(serialized, rawData); - *db << sql, soci::use(rawData); - tr.commit(); + addValidatorManifest(*db, serialized); } } } diff --git a/src/ripple/overlay/impl/PeerReservationTable.cpp b/src/ripple/overlay/impl/PeerReservationTable.cpp index c391fe8630..6e88da123f 100644 --- a/src/ripple/overlay/impl/PeerReservationTable.cpp +++ b/src/ripple/overlay/impl/PeerReservationTable.cpp @@ -19,8 +19,9 @@ #include +#include +#include #include -#include #include #include #include @@ -72,36 +73,9 @@ PeerReservationTable::load(DatabaseCon& connection) std::lock_guard lock(mutex_); connection_ = &connection; - auto db = connection_->checkoutDb(); - - // These values must be boost::optionals (not std) because SOCI expects - // boost::optionals. - boost::optional valPubKey, valDesc; - // We should really abstract the table and column names into constants, - // but no one else does. Because it is too tedious? It would be easy if we - // had a jOOQ for C++. - soci::statement st = - (db->prepare << "SELECT PublicKey, Description FROM PeerReservations;", - soci::into(valPubKey), - soci::into(valDesc)); - st.execute(); - while (st.fetch()) - { - if (!valPubKey || !valDesc) - { - // This represents a `NULL` in a `NOT NULL` column. It should be - // unreachable. - continue; - } - auto const optNodeId = - parseBase58(TokenType::NodePublic, *valPubKey); - if (!optNodeId) - { - JLOG(journal_.warn()) << "load: not a public key: " << valPubKey; - continue; - } - table_.insert(PeerReservation{*optNodeId, *valDesc}); - } + auto db = connection.checkoutDb(); + auto table = getPeerReservationTable(*db, journal_); + table_.insert(table.begin(), table.end()); return true; } @@ -135,12 +109,7 @@ PeerReservationTable::insert_or_assign(PeerReservation const& reservation) table_.insert(hint, reservation); auto db = connection_->checkoutDb(); - *db << "INSERT INTO PeerReservations (PublicKey, Description) " - "VALUES (:nodeId, :desc) " - "ON CONFLICT (PublicKey) DO UPDATE SET " - "Description=excluded.Description", - soci::use(toBase58(TokenType::NodePublic, reservation.nodeId)), - soci::use(reservation.description); + insertPeerReservation(*db, reservation.nodeId, reservation.description); return previous; } @@ -158,8 +127,7 @@ PeerReservationTable::erase(PublicKey const& nodeId) previous = *it; table_.erase(it); auto db = connection_->checkoutDb(); - *db << "DELETE FROM PeerReservations WHERE PublicKey = :nodeId", - soci::use(toBase58(TokenType::NodePublic, nodeId)); + deletePeerReservation(*db, nodeId); } return previous; diff --git a/src/ripple/peerfinder/impl/PeerfinderManager.cpp b/src/ripple/peerfinder/impl/PeerfinderManager.cpp index 7da7e17140..ed41c86520 100644 --- a/src/ripple/peerfinder/impl/PeerfinderManager.cpp +++ b/src/ripple/peerfinder/impl/PeerfinderManager.cpp @@ -17,7 +17,6 @@ */ //============================================================================== -#include #include #include #include @@ -42,7 +41,7 @@ public: StoreSqdb m_store; Checker checker_; Logic m_logic; - SociConfig m_sociConfig; + BasicConfig const& m_config; //-------------------------------------------------------------------------- @@ -61,7 +60,7 @@ public: , m_store(journal) , checker_(io_service_) , m_logic(clock, m_store, checker_, journal) - , m_sociConfig(config, "peerfinder") + , m_config(config) , m_stats(std::bind(&ManagerImp::collect_metrics, this), collector) { } @@ -223,7 +222,7 @@ public: void onPrepare() override { - m_store.open(m_sociConfig); + m_store.open(m_config); m_logic.load(); } diff --git a/src/ripple/peerfinder/impl/StoreSqdb.h b/src/ripple/peerfinder/impl/StoreSqdb.h index 9319b95aa1..c03191ccdf 100644 --- a/src/ripple/peerfinder/impl/StoreSqdb.h +++ b/src/ripple/peerfinder/impl/StoreSqdb.h @@ -20,8 +20,8 @@ #ifndef RIPPLE_PEERFINDER_STORESQDB_H_INCLUDED #define RIPPLE_PEERFINDER_STORESQDB_H_INCLUDED +#include #include -#include #include namespace ripple { @@ -32,7 +32,7 @@ class StoreSqdb : public Store { private: beast::Journal m_journal; - soci::session m_session; + soci::session m_sqlDb; public: enum { @@ -51,14 +51,9 @@ public: } void - open(SociConfig const& sociConfig) + open(BasicConfig const& config) { - sociConfig.open(m_session); - - JLOG(m_journal.info()) - << "Opening database at '" << sociConfig.connectionString() << "'"; - - init(); + init(config); update(); } @@ -68,19 +63,8 @@ public: load(load_callback const& cb) override { std::size_t n(0); - std::string s; - int valence; - soci::statement st = - (m_session.prepare << "SELECT " - " address, " - " valence " - "FROM PeerFinder_BootstrapCache;", - soci::into(s), - soci::into(valence)); - st.execute(); - while (st.fetch()) - { + readPeerFinderDB(m_sqlDb, [&](std::string const& s, int valence) { beast::IP::Endpoint const endpoint( beast::IP::Endpoint::from_string(s)); @@ -94,7 +78,8 @@ public: JLOG(m_journal.error()) << "Bad address string '" << s << "' in Bootcache table"; } - } + }); + return n; } @@ -103,32 +88,7 @@ public: void save(std::vector const& v) override { - soci::transaction tr(m_session); - m_session << "DELETE FROM PeerFinder_BootstrapCache;"; - - if (!v.empty()) - { - std::vector s; - std::vector valence; - s.reserve(v.size()); - valence.reserve(v.size()); - - for (auto const& e : v) - { - s.emplace_back(to_string(e.endpoint)); - valence.emplace_back(e.valence); - } - - m_session << "INSERT INTO PeerFinder_BootstrapCache ( " - " address, " - " valence " - ") VALUES ( " - " :s, :valence " - ");", - soci::use(s), soci::use(valence); - } - - tr.commit(); + savePeerFinderDB(m_sqlDb, v); } // Convert any existing entries from an older schema to the @@ -136,185 +96,14 @@ public: void update() { - soci::transaction tr(m_session); - // get version - int version(0); - { - // SOCI requires a boost::optional (not std::optional) parameter. - boost::optional vO; - m_session << "SELECT " - " version " - "FROM SchemaVersion WHERE " - " name = 'PeerFinder';", - soci::into(vO); - - version = vO.value_or(0); - - JLOG(m_journal.info()) - << "Opened version " << version << " database"; - } - - { - if (version < currentSchemaVersion) - { - JLOG(m_journal.info()) - << "Updating database to version " << currentSchemaVersion; - } - else if (version > currentSchemaVersion) - { - Throw( - "The PeerFinder database version is higher than expected"); - } - } - - if (version < 4) - { - // - // Remove the "uptime" column from the bootstrap table - // - - m_session << "CREATE TABLE IF NOT EXISTS " - "PeerFinder_BootstrapCache_Next ( " - " id INTEGER PRIMARY KEY AUTOINCREMENT, " - " address TEXT UNIQUE NOT NULL, " - " valence INTEGER" - ");"; - - m_session << "CREATE INDEX IF NOT EXISTS " - " PeerFinder_BootstrapCache_Next_Index ON " - " PeerFinder_BootstrapCache_Next " - " ( address ); "; - - std::size_t count; - m_session << "SELECT COUNT(*) FROM PeerFinder_BootstrapCache;", - soci::into(count); - - std::vector list; - - { - list.reserve(count); - std::string s; - int valence; - soci::statement st = - (m_session.prepare << "SELECT " - " address, " - " valence " - "FROM PeerFinder_BootstrapCache;", - soci::into(s), - soci::into(valence)); - - st.execute(); - while (st.fetch()) - { - Store::Entry entry; - entry.endpoint = beast::IP::Endpoint::from_string(s); - if (!is_unspecified(entry.endpoint)) - { - entry.valence = valence; - list.push_back(entry); - } - else - { - JLOG(m_journal.error()) << "Bad address string '" << s - << "' in Bootcache table"; - } - } - } - - if (!list.empty()) - { - std::vector s; - std::vector valence; - s.reserve(list.size()); - valence.reserve(list.size()); - - for (auto iter(list.cbegin()); iter != list.cend(); ++iter) - { - s.emplace_back(to_string(iter->endpoint)); - valence.emplace_back(iter->valence); - } - - m_session << "INSERT INTO PeerFinder_BootstrapCache_Next ( " - " address, " - " valence " - ") VALUES ( " - " :s, :valence" - ");", - soci::use(s), soci::use(valence); - } - - m_session << "DROP TABLE IF EXISTS PeerFinder_BootstrapCache;"; - - m_session - << "DROP INDEX IF EXISTS PeerFinder_BootstrapCache_Index;"; - - m_session << "ALTER TABLE PeerFinder_BootstrapCache_Next " - " RENAME TO PeerFinder_BootstrapCache;"; - - m_session << "CREATE INDEX IF NOT EXISTS " - " PeerFinder_BootstrapCache_Index ON " - "PeerFinder_BootstrapCache " - " ( " - " address " - " ); "; - } - - if (version < 3) - { - // - // Remove legacy endpoints from the schema - // - - m_session << "DROP TABLE IF EXISTS LegacyEndpoints;"; - - m_session << "DROP TABLE IF EXISTS PeerFinderLegacyEndpoints;"; - - m_session << "DROP TABLE IF EXISTS PeerFinder_LegacyEndpoints;"; - - m_session - << "DROP TABLE IF EXISTS PeerFinder_LegacyEndpoints_Index;"; - } - - { - int const v(currentSchemaVersion); - m_session << "INSERT OR REPLACE INTO SchemaVersion (" - " name " - " ,version " - ") VALUES ( " - " 'PeerFinder', :version " - ");", - soci::use(v); - } - - tr.commit(); + updatePeerFinderDB(m_sqlDb, currentSchemaVersion, m_journal); } private: void - init() + init(BasicConfig const& config) { - soci::transaction tr(m_session); - m_session << "PRAGMA encoding=\"UTF-8\";"; - - m_session << "CREATE TABLE IF NOT EXISTS SchemaVersion ( " - " name TEXT PRIMARY KEY, " - " version INTEGER" - ");"; - - m_session << "CREATE TABLE IF NOT EXISTS PeerFinder_BootstrapCache ( " - " id INTEGER PRIMARY KEY AUTOINCREMENT, " - " address TEXT UNIQUE NOT NULL, " - " valence INTEGER" - ");"; - - m_session << "CREATE INDEX IF NOT EXISTS " - " PeerFinder_BootstrapCache_Index ON " - "PeerFinder_BootstrapCache " - " ( " - " address " - " ); "; - - tr.commit(); + initPeerFinderDB(m_sqlDb, config, m_journal); } }; diff --git a/src/ripple/rpc/ShardArchiveHandler.h b/src/ripple/rpc/ShardArchiveHandler.h index 2e2133bc24..500dee20f4 100644 --- a/src/ripple/rpc/ShardArchiveHandler.h +++ b/src/ripple/rpc/ShardArchiveHandler.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -127,13 +128,13 @@ private: ///////////////////////////////////////////////// // m_ is used to protect access to downloader_, // archives_, process_ and to protect setting and - // destroying sqliteDB_. + // destroying sqlDB_. ///////////////////////////////////////////////// std::mutex mutable m_; std::shared_ptr downloader_; std::map archives_; bool process_; - std::unique_ptr sqliteDB_; + std::unique_ptr sqlDB_; ///////////////////////////////////////////////// Application& app_; diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index 4284cab0d5..7dae496e95 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include #include @@ -40,42 +42,14 @@ namespace ripple { -using LedgerSequence = uint32_t; -using LedgerHash = uint256; -using LedgerShortcut = RPC::LedgerShortcut; +using TxnsData = RelationalDBInterface::AccountTxs; +using TxnsDataBinary = RelationalDBInterface::MetaTxsList; +using TxnDataBinary = RelationalDBInterface::txnMetaLedgerType; +using AccountTxArgs = RelationalDBInterface::AccountTxArgs; +using AccountTxResult = RelationalDBInterface::AccountTxResult; -using AccountTxMarker = NetworkOPs::AccountTxMarker; - -struct LedgerRange -{ - uint32_t min; - uint32_t max; -}; - -using LedgerSpecifier = - std::variant; - -struct AccountTxArgs -{ - AccountID account; - std::optional ledger; - bool binary = false; - bool forward = false; - uint32_t limit = 0; - std::optional marker; -}; - -using TxnsData = NetworkOPs::AccountTxs; -using TxnsDataBinary = NetworkOPs::MetaTxsList; -using TxnDataBinary = NetworkOPs::txnMetaLedgerType; - -struct AccountTxResult -{ - std::variant transactions; - LedgerRange ledgerRange; - uint32_t limit; - std::optional marker; -}; +using LedgerShortcut = RelationalDBInterface::LedgerShortcut; +using LedgerSpecifier = RelationalDBInterface::LedgerSpecifier; // parses args into a ledger specifier, or returns a grpc status object on error std::variant, grpc::Status> @@ -275,261 +249,14 @@ getLedgerRange( return LedgerRange{uLedgerMin, uLedgerMax}; } -enum class DataFormat { binary, expanded }; -std::variant -flatFetchTransactions( - RPC::Context& context, - std::vector& nodestoreHashes, - std::vector& ledgerSequences, - DataFormat format) -{ - std::variant ret; - if (format == DataFormat::binary) - ret = TxnsDataBinary(); - else - ret = TxnsData(); - - std::vector< - std::pair, std::shared_ptr>> - txns = flatFetchTransactions(context.app, nodestoreHashes); - for (size_t i = 0; i < txns.size(); ++i) - { - auto& [txn, meta] = txns[i]; - if (format == DataFormat::binary) - { - auto& transactions = std::get(ret); - Serializer txnSer = txn->getSerializer(); - Serializer metaSer = meta->getSerializer(); - // SerialIter it(item->slice()); - Blob txnBlob = txnSer.getData(); - Blob metaBlob = metaSer.getData(); - transactions.push_back( - std::make_tuple(txnBlob, metaBlob, ledgerSequences[i])); - } - else - { - auto& transactions = std::get(ret); - std::string reason; - auto txnRet = - std::make_shared(txn, reason, context.app); - txnRet->setLedger(ledgerSequences[i]); - txnRet->setStatus(COMMITTED); - auto txMeta = std::make_shared( - txnRet->getID(), ledgerSequences[i], *meta); - transactions.push_back(std::make_pair(txnRet, txMeta)); - } - } - return ret; -} - -std::pair -processAccountTxStoredProcedureResult( - AccountTxArgs const& args, - Json::Value& result, - RPC::Context& context) -{ - AccountTxResult ret; - ret.limit = args.limit; - - try - { - if (result.isMember("transactions")) - { - std::vector nodestoreHashes; - std::vector ledgerSequences; - for (auto& t : result["transactions"]) - { - if (t.isMember("ledger_seq") && t.isMember("nodestore_hash")) - { - uint32_t ledgerSequence = t["ledger_seq"].asUInt(); - std::string nodestoreHashHex = - t["nodestore_hash"].asString(); - nodestoreHashHex.erase(0, 2); - uint256 nodestoreHash; - if (!nodestoreHash.parseHex(nodestoreHashHex)) - assert(false); - - if (nodestoreHash.isNonZero()) - { - ledgerSequences.push_back(ledgerSequence); - nodestoreHashes.push_back(nodestoreHash); - } - else - { - assert(false); - return {ret, {rpcINTERNAL, "nodestoreHash is zero"}}; - } - } - else - { - assert(false); - return {ret, {rpcINTERNAL, "missing postgres fields"}}; - } - } - - assert(nodestoreHashes.size() == ledgerSequences.size()); - ret.transactions = flatFetchTransactions( - context, - nodestoreHashes, - ledgerSequences, - args.binary ? DataFormat::binary : DataFormat::expanded); - - JLOG(context.j.trace()) << __func__ << " : processed db results"; - - if (result.isMember("marker")) - { - auto& marker = result["marker"]; - assert(marker.isMember("ledger")); - assert(marker.isMember("seq")); - ret.marker = { - marker["ledger"].asUInt(), marker["seq"].asUInt()}; - } - assert(result.isMember("ledger_index_min")); - assert(result.isMember("ledger_index_max")); - ret.ledgerRange = { - result["ledger_index_min"].asUInt(), - result["ledger_index_max"].asUInt()}; - return {ret, rpcSUCCESS}; - } - else if (result.isMember("error")) - { - JLOG(context.j.debug()) - << __func__ << " : error = " << result["error"].asString(); - return { - ret, - RPC::Status{rpcINVALID_PARAMS, result["error"].asString()}}; - } - else - { - return {ret, {rpcINTERNAL, "unexpected Postgres response"}}; - } - } - catch (std::exception& e) - { - JLOG(context.j.debug()) << __func__ << " : " - << "Caught exception : " << e.what(); - return {ret, {rpcINTERNAL, e.what()}}; - } -} - -std::pair -doAccountTxStoredProcedure(AccountTxArgs const& args, RPC::Context& context) -{ -#ifdef RIPPLED_REPORTING - pg_params dbParams; - - char const*& command = dbParams.first; - std::vector>& values = dbParams.second; - command = - "SELECT account_tx($1::bytea, $2::bool, " - "$3::bigint, $4::bigint, $5::bigint, $6::bytea, " - "$7::bigint, $8::bool, $9::bigint, $10::bigint)"; - values.resize(10); - values[0] = "\\x" + strHex(args.account); - values[1] = args.forward ? "true" : "false"; - - static std::uint32_t const page_length(200); - if (args.limit == 0 || args.limit > page_length) - values[2] = std::to_string(page_length); - else - values[2] = std::to_string(args.limit); - - if (args.ledger) - { - if (auto range = std::get_if(&args.ledger.value())) - { - values[3] = std::to_string(range->min); - values[4] = std::to_string(range->max); - } - else if (auto hash = std::get_if(&args.ledger.value())) - { - values[5] = ("\\x" + strHex(*hash)); - } - else if ( - auto sequence = std::get_if(&args.ledger.value())) - { - values[6] = std::to_string(*sequence); - } - else if (std::get_if(&args.ledger.value())) - { - // current, closed and validated are all treated as validated - values[7] = "true"; - } - else - { - JLOG(context.j.error()) << "doAccountTxStoredProcedure - " - << "Error parsing ledger args"; - return {}; - } - } - - if (args.marker) - { - values[8] = std::to_string(args.marker->ledgerSeq); - values[9] = std::to_string(args.marker->txnSeq); - } - for (size_t i = 0; i < values.size(); ++i) - { - JLOG(context.j.trace()) << "value " << std::to_string(i) << " = " - << (values[i] ? values[i].value() : "null"); - } - - auto res = PgQuery(context.app.getPgPool())(dbParams); - if (!res) - { - JLOG(context.j.error()) - << __func__ << " : Postgres response is null - account = " - << strHex(args.account); - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(context.j.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - account = " << strHex(args.account); - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - - JLOG(context.j.trace()) - << __func__ << " Postgres result msg : " << res.msg(); - if (res.isNull() || res.ntuples() == 0) - { - JLOG(context.j.debug()) - << __func__ << " : No data returned from Postgres : account = " - << strHex(args.account); - - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - - char const* resultStr = res.c_str(); - JLOG(context.j.trace()) << __func__ << " : " - << "postgres result = " << resultStr - << " : account = " << strHex(args.account); - - Json::Value v; - Json::Reader reader; - bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); - if (success) - { - return processAccountTxStoredProcedureResult(args, v, context); - } -#endif - // This shouldn't happen. Postgres should return a parseable error - assert(false); - return {{}, {rpcINTERNAL, "Failed to deserialize Postgres result"}}; -} - std::pair doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) { context.loadType = Resource::feeMediumBurdenRPC; if (context.app.config().reporting()) - return doAccountTxStoredProcedure(args, context); + return dynamic_cast( + &context.app.getRelationalDBInterface()) + ->getAccountTx(args); AccountTxResult result; @@ -543,27 +270,52 @@ doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) result.ledgerRange = std::get(lgrRange); result.marker = args.marker; + + RelationalDBInterface::AccountTxPageOptions options = { + args.account, + result.ledgerRange.min, + result.ledgerRange.max, + result.marker, + args.limit, + isUnlimited(context.role)}; + if (args.binary) { - result.transactions = context.netOps.getTxsAccountB( - args.account, - result.ledgerRange.min, - result.ledgerRange.max, - args.forward, - result.marker, - args.limit, - isUnlimited(context.role)); + if (args.forward) + { + auto [tx, marker] = dynamic_cast( + &context.app.getRelationalDBInterface()) + ->oldestAccountTxPageB(options); + result.transactions = tx; + result.marker = marker; + } + else + { + auto [tx, marker] = dynamic_cast( + &context.app.getRelationalDBInterface()) + ->newestAccountTxPageB(options); + result.transactions = tx; + result.marker = marker; + } } else { - result.transactions = context.netOps.getTxsAccount( - args.account, - result.ledgerRange.min, - result.ledgerRange.max, - args.forward, - result.marker, - args.limit, - isUnlimited(context.role)); + if (args.forward) + { + auto [tx, marker] = dynamic_cast( + &context.app.getRelationalDBInterface()) + ->oldestAccountTxPage(options); + result.transactions = tx; + result.marker = marker; + } + else + { + auto [tx, marker] = dynamic_cast( + &context.app.getRelationalDBInterface()) + ->newestAccountTxPage(options); + result.transactions = tx; + result.marker = marker; + } } result.limit = args.limit; diff --git a/src/ripple/rpc/handlers/AccountTxOld.cpp b/src/ripple/rpc/handlers/AccountTxOld.cpp index 5950f474d3..9c5bb0bceb 100644 --- a/src/ripple/rpc/handlers/AccountTxOld.cpp +++ b/src/ripple/rpc/handlers/AccountTxOld.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -49,9 +50,9 @@ doAccountTxOld(RPC::JsonContext& context) std::uint32_t offset = context.params.isMember(jss::offset) ? context.params[jss::offset].asUInt() : 0; - int limit = context.params.isMember(jss::limit) + std::uint32_t limit = context.params.isMember(jss::limit) ? context.params[jss::limit].asUInt() - : -1; + : UINT32_MAX; bool bBinary = context.params.isMember(jss::binary) && context.params[jss::binary].asBool(); bool bDescending = context.params.isMember(jss::descending) && @@ -151,16 +152,26 @@ doAccountTxOld(RPC::JsonContext& context) ret[jss::account] = context.app.accountIDCache().toBase58(*raAccount); Json::Value& jvTxns = (ret[jss::transactions] = Json::arrayValue); + RelationalDBInterface::AccountTxOptions options = { + *raAccount, + uLedgerMin, + uLedgerMax, + offset, + limit, + isUnlimited(context.role)}; + if (bBinary) { - auto txns = context.netOps.getAccountTxsB( - *raAccount, - uLedgerMin, - uLedgerMax, - bDescending, - offset, - limit, - isUnlimited(context.role)); + std::vector txns; + + if (bDescending) + txns = dynamic_cast( + &context.app.getRelationalDBInterface()) + ->getNewestAccountTxsB(options); + else + txns = dynamic_cast( + &context.app.getRelationalDBInterface()) + ->getOldestAccountTxsB(options); for (auto it = txns.begin(), end = txns.end(); it != end; ++it) { @@ -178,14 +189,16 @@ doAccountTxOld(RPC::JsonContext& context) } else { - auto txns = context.netOps.getAccountTxs( - *raAccount, - uLedgerMin, - uLedgerMax, - bDescending, - offset, - limit, - isUnlimited(context.role)); + RelationalDBInterface::AccountTxs txns; + + if (bDescending) + txns = dynamic_cast( + &context.app.getRelationalDBInterface()) + ->getNewestAccountTxs(options); + else + txns = dynamic_cast( + &context.app.getRelationalDBInterface()) + ->getOldestAccountTxs(options); for (auto it = txns.begin(), end = txns.end(); it != end; ++it) { diff --git a/src/ripple/rpc/handlers/GetCounts.cpp b/src/ripple/rpc/handlers/GetCounts.cpp index ffa5322123..fd189ebfe6 100644 --- a/src/ripple/rpc/handlers/GetCounts.cpp +++ b/src/ripple/rpc/handlers/GetCounts.cpp @@ -22,8 +22,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -75,17 +75,23 @@ getCountsJson(Application& app, int minObjectCount) if (!app.config().reporting() && app.config().useTxTables()) { - int dbKB = getKBUsedAll(app.getLedgerDB().getSession()); + int dbKB = dynamic_cast( + &app.getRelationalDBInterface()) + ->getKBUsedAll(); if (dbKB > 0) ret[jss::dbKBTotal] = dbKB; - dbKB = getKBUsedDB(app.getLedgerDB().getSession()); + dbKB = dynamic_cast( + &app.getRelationalDBInterface()) + ->getKBUsedLedger(); if (dbKB > 0) ret[jss::dbKBLedger] = dbKB; - dbKB = getKBUsedDB(app.getTxnDB().getSession()); + dbKB = dynamic_cast( + &app.getRelationalDBInterface()) + ->getKBUsedTransaction(); if (dbKB > 0) ret[jss::dbKBTransaction] = dbKB; diff --git a/src/ripple/rpc/handlers/TxHistory.cpp b/src/ripple/rpc/handlers/TxHistory.cpp index 6b972bae25..266327e912 100644 --- a/src/ripple/rpc/handlers/TxHistory.cpp +++ b/src/ripple/rpc/handlers/TxHistory.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -34,113 +35,6 @@ namespace ripple { -Json::Value -doTxHistoryReporting(RPC::JsonContext& context) -{ - Json::Value ret; -#ifdef RIPPLED_REPORTING - if (!context.app.config().reporting()) - { - assert(false); - Throw( - "called doTxHistoryReporting but not in reporting mode"); - } - context.loadType = Resource::feeMediumBurdenRPC; - - if (!context.params.isMember(jss::start)) - return rpcError(rpcINVALID_PARAMS); - - unsigned int startIndex = context.params[jss::start].asUInt(); - - if ((startIndex > 10000) && (!isUnlimited(context.role))) - return rpcError(rpcNO_PERMISSION); - - std::string sql = boost::str( - boost::format("SELECT nodestore_hash, ledger_seq " - " FROM transactions" - " ORDER BY ledger_seq DESC LIMIT 20 " - "OFFSET %u;") % - startIndex); - - auto res = PgQuery(context.app.getPgPool())(sql.data()); - - if (!res) - { - JLOG(context.j.error()) - << __func__ << " : Postgres response is null - sql = " << sql; - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(context.j.error()) - << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() << " - sql = " << sql; - assert(false); - return {}; - } - - JLOG(context.j.trace()) - << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(context.j.debug()) << __func__ << " : Empty postgres response"; - assert(false); - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 2) - { - JLOG(context.j.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 1, but got " - << res.nfields() << " . sql = " << sql; - assert(false); - return {}; - } - } - - JLOG(context.j.trace()) - << __func__ << " : Postgres result = " << res.c_str(); - - Json::Value txs; - - std::vector nodestoreHashes; - std::vector ledgerSequences; - for (size_t i = 0; i < res.ntuples(); ++i) - { - uint256 hash; - if (!hash.parseHex(res.c_str(i, 0) + 2)) - assert(false); - nodestoreHashes.push_back(hash); - ledgerSequences.push_back(res.asBigInt(i, 1)); - } - - auto txns = flatFetchTransactions(context.app, nodestoreHashes); - for (size_t i = 0; i < txns.size(); ++i) - { - auto const& [sttx, meta] = txns[i]; - assert(sttx); - - std::string reason; - auto txn = std::make_shared(sttx, reason, context.app); - txn->setLedger(ledgerSequences[i]); - txn->setStatus(COMMITTED); - txs.append(txn->getJson(JsonOptions::none)); - } - - ret[jss::index] = startIndex; - ret[jss::txs] = txs; - ret["used_postgres"] = true; - -#endif - return ret; -} - // { // start: // } @@ -150,8 +44,6 @@ doTxHistory(RPC::JsonContext& context) if (!context.app.config().useTxTables()) return rpcError(rpcNOT_ENABLED); - if (context.app.config().reporting()) - return doTxHistoryReporting(context); context.loadType = Resource::feeMediumBurdenRPC; if (!context.params.isMember(jss::start)) @@ -162,48 +54,17 @@ doTxHistory(RPC::JsonContext& context) if ((startIndex > 10000) && (!isUnlimited(context.role))) return rpcError(rpcNO_PERMISSION); + auto trans = + context.app.getRelationalDBInterface().getTxHistory(startIndex); + Json::Value obj; - Json::Value txs; - + Json::Value& txs = obj[jss::txs]; obj[jss::index] = startIndex; + if (context.app.config().reporting()) + obj["used_postgres"] = true; - std::string sql = boost::str( - boost::format( - "SELECT LedgerSeq, Status, RawTxn " - "FROM Transactions ORDER BY LedgerSeq desc LIMIT %u,20;") % - startIndex); - - { - auto db = context.app.getTxnDB().checkoutDb(); - - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional ledgerSeq; - boost::optional status; - soci::blob sociRawTxnBlob(*db); - soci::indicator rti; - Blob rawTxn; - - soci::statement st = - (db->prepare << sql, - soci::into(ledgerSeq), - soci::into(status), - soci::into(sociRawTxnBlob, rti)); - - st.execute(); - while (st.fetch()) - { - if (soci::i_ok == rti) - convert(sociRawTxnBlob, rawTxn); - else - rawTxn.clear(); - - if (auto trans = Transaction::transactionFromSQL( - ledgerSeq, status, rawTxn, context.app)) - txs.append(trans->getJson(JsonOptions::none)); - } - } - - obj[jss::txs] = txs; + for (auto t : trans) + txs.append(t->getJson(JsonOptions::none)); return obj; } diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index fc17b5ee31..a483b8c7ca 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -490,7 +491,8 @@ isValidated( if (hash) { assert(hash->isNonZero()); - uint256 valHash = getHashByIndex(seq, app); + uint256 valHash = + app.getRelationalDBInterface().getHashByIndex(seq); if (valHash == ledger.info().hash) { // SQL database doesn't match ledger chain diff --git a/src/ripple/rpc/impl/ShardArchiveHandler.cpp b/src/ripple/rpc/impl/ShardArchiveHandler.cpp index c0b0676dd2..83928032c4 100644 --- a/src/ripple/rpc/impl/ShardArchiveHandler.cpp +++ b/src/ripple/rpc/impl/ShardArchiveHandler.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include #include @@ -92,7 +93,7 @@ ShardArchiveHandler::init() { std::lock_guard lock(m_); - if (process_ || downloader_ != nullptr || sqliteDB_ != nullptr) + if (process_ || downloader_ != nullptr || sqlDB_ != nullptr) { JLOG(j_.warn()) << "Archives already being processed"; return false; @@ -115,11 +116,7 @@ ShardArchiveHandler::init() { create_directories(downloadDir_); - sqliteDB_ = std::make_unique( - downloadDir_, - stateDBName, - DownloaderDBPragma, - ShardArchiveHandlerDBInit); + sqlDB_ = makeArchiveDB(downloadDir_, stateDBName); } catch (std::exception const& e) { @@ -144,31 +141,20 @@ ShardArchiveHandler::initFromDB(std::lock_guard const& lock) exists(downloadDir_ / stateDBName) && is_regular_file(downloadDir_ / stateDBName)); - sqliteDB_ = std::make_unique( - downloadDir_, - stateDBName, - DownloaderDBPragma, - ShardArchiveHandlerDBInit); + sqlDB_ = makeArchiveDB(downloadDir_, stateDBName); - auto& session{sqliteDB_->getSession()}; - - soci::rowset rs = - (session.prepare << "SELECT * FROM State;"); - - for (auto it = rs.begin(); it != rs.end(); ++it) - { + readArchiveDB(*sqlDB_, [&](std::string const& url_, int state) { parsedURL url; - if (!parseUrl(url, it->get(1))) + if (!parseUrl(url, url_)) { - JLOG(j_.error()) - << "Failed to parse url: " << it->get(1); + JLOG(j_.error()) << "Failed to parse url: " << url_; - continue; + return; } - add(it->get(0), std::move(url), lock); - } + add(state, std::move(url), lock); + }); // Failed to load anything // from the state database. @@ -223,10 +209,7 @@ ShardArchiveHandler::add( if (!add(shardIndex, std::forward(url.first), lock)) return false; - auto& session{sqliteDB_->getSession()}; - - session << "INSERT INTO State VALUES (:index, :url);", - soci::use(shardIndex), soci::use(url.second); + insertArchiveDB(*sqlDB_, shardIndex, url.second); return true; } @@ -537,10 +520,7 @@ ShardArchiveHandler::remove(std::lock_guard const&) app_.getShardStore()->removePreShard(shardIndex); archives_.erase(shardIndex); - auto& session{sqliteDB_->getSession()}; - - session << "DELETE FROM State WHERE ShardIndex = :index;", - soci::use(shardIndex); + deleteFromArchiveDB(*sqlDB_, shardIndex); auto const dstDir{downloadDir_ / std::to_string(shardIndex)}; try @@ -561,13 +541,9 @@ ShardArchiveHandler::doRelease(std::lock_guard const&) app_.getShardStore()->removePreShard(ar.first); archives_.clear(); - { - auto& session{sqliteDB_->getSession()}; + dropArchiveDB(*sqlDB_); - session << "DROP TABLE State;"; - } - - sqliteDB_.reset(); + sqlDB_.reset(); // Remove temp root download directory try diff --git a/src/test/app/Manifest_test.cpp b/src/test/app/Manifest_test.cpp index 82457802a6..47b16d9488 100644 --- a/src/test/app/Manifest_test.cpp +++ b/src/test/app/Manifest_test.cpp @@ -20,10 +20,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -254,14 +254,12 @@ public: std::string const dbName("ManifestCacheTestDB"); { + jtx::Env env(*this); DatabaseCon::Setup setup; setup.dataDir = getDatabasePath(); - BEAST_EXPECT(!setup.useGlobalPragma); - DatabaseCon dbCon( - setup, - dbName.data(), - std::array(), - WalletDBInit); + assert(!setup.useGlobalPragma); + + auto dbCon = makeTestWalletDB(setup, dbName); auto getPopulatedManifests = [](ManifestCache const& cache) -> std::vector { @@ -284,7 +282,6 @@ public: std::vector const inManifests( sort(getPopulatedManifests(m))); - jtx::Env env(*this); auto& app = env.app(); auto unl = std::make_unique( m, @@ -297,7 +294,7 @@ public: // save should not store untrusted master keys to db // except for revocations m.save( - dbCon, + *dbCon, "ValidatorManifests", [&unl](PublicKey const& pubKey) { return unl->listed(pubKey); @@ -305,7 +302,7 @@ public: ManifestCache loaded; - loaded.load(dbCon, "ValidatorManifests"); + loaded.load(*dbCon, "ValidatorManifests"); // check that all loaded manifests are revocations std::vector const loadedManifests( @@ -326,13 +323,13 @@ public: unl->load(emptyLocalKey, s1, keys); m.save( - dbCon, + *dbCon, "ValidatorManifests", [&unl](PublicKey const& pubKey) { return unl->listed(pubKey); }); ManifestCache loaded; - loaded.load(dbCon, "ValidatorManifests"); + loaded.load(*dbCon, "ValidatorManifests"); // check that the manifest caches are the same std::vector const loadedManifests( @@ -360,7 +357,10 @@ public: std::string const badManifest = "bad manifest"; BEAST_EXPECT(!loaded.load( - dbCon, "ValidatorManifests", badManifest, emptyRevocation)); + *dbCon, + "ValidatorManifests", + badManifest, + emptyRevocation)); auto const sk = randomSecretKey(); auto const pk = derivePublicKey(KeyType::ed25519, sk); @@ -370,7 +370,10 @@ public: makeManifestString(pk, sk, kp.first, kp.second, 0); BEAST_EXPECT(loaded.load( - dbCon, "ValidatorManifests", cfgManifest, emptyRevocation)); + *dbCon, + "ValidatorManifests", + cfgManifest, + emptyRevocation)); } { // load config revocation @@ -380,7 +383,10 @@ public: std::vector const badRevocation = { "bad revocation"}; BEAST_EXPECT(!loaded.load( - dbCon, "ValidatorManifests", emptyManifest, badRevocation)); + *dbCon, + "ValidatorManifests", + emptyManifest, + badRevocation)); auto const sk = randomSecretKey(); auto const keyType = KeyType::ed25519; @@ -390,13 +396,16 @@ public: makeManifestString(pk, sk, kp.first, kp.second, 0)}; BEAST_EXPECT(!loaded.load( - dbCon, "ValidatorManifests", emptyManifest, nonRevocation)); + *dbCon, + "ValidatorManifests", + emptyManifest, + nonRevocation)); BEAST_EXPECT(!loaded.revoked(pk)); std::vector const badSigRevocation = { makeRevocationString(sk, keyType, true)}; BEAST_EXPECT(!loaded.load( - dbCon, + *dbCon, "ValidatorManifests", emptyManifest, badSigRevocation)); @@ -405,7 +414,10 @@ public: std::vector const cfgRevocation = { makeRevocationString(sk, keyType)}; BEAST_EXPECT(loaded.load( - dbCon, "ValidatorManifests", emptyManifest, cfgRevocation)); + *dbCon, + "ValidatorManifests", + emptyManifest, + cfgRevocation)); BEAST_EXPECT(loaded.revoked(pk)); } diff --git a/src/test/app/SHAMapStore_test.cpp b/src/test/app/SHAMapStore_test.cpp index 65462fe751..fd21983a48 100644 --- a/src/test/app/SHAMapStore_test.cpp +++ b/src/test/app/SHAMapStore_test.cpp @@ -19,9 +19,8 @@ #include #include +#include #include -#include -#include #include #include #include @@ -64,31 +63,26 @@ class SHAMapStore_test : public beast::unit_test::suite return good; auto const seq = json[jss::result][jss::ledger_index].asUInt(); - std::string outHash; - LedgerIndex outSeq; - std::string outParentHash; - std::string outDrops; - std::uint64_t outCloseTime; - std::uint64_t outParentCloseTime; - std::uint64_t outCloseTimeResolution; - std::uint64_t outCloseFlags; - std::string outAccountHash; - std::string outTxHash; - { - auto db = env.app().getLedgerDB().checkoutDb(); + std::optional oinfo = + env.app().getRelationalDBInterface().getLedgerInfoByIndex(seq); + if (!oinfo) + return false; + const LedgerInfo& info = oinfo.value(); - *db << "SELECT LedgerHash,LedgerSeq,PrevHash,TotalCoins, " - "ClosingTime,PrevClosingTime,CloseTimeRes,CloseFlags, " - "AccountSetHash,TransSetHash " - "FROM Ledgers " - "WHERE LedgerSeq = :seq", - soci::use(seq), soci::into(outHash), soci::into(outSeq), - soci::into(outParentHash), soci::into(outDrops), - soci::into(outCloseTime), soci::into(outParentCloseTime), - soci::into(outCloseTimeResolution), soci::into(outCloseFlags), - soci::into(outAccountHash), soci::into(outTxHash); - } + const std::string outHash = to_string(info.hash); + const LedgerIndex outSeq = info.seq; + const std::string outParentHash = to_string(info.parentHash); + const std::string outDrops = to_string(info.drops); + const std::uint64_t outCloseTime = + info.closeTime.time_since_epoch().count(); + const std::uint64_t outParentCloseTime = + info.parentCloseTime.time_since_epoch().count(); + const std::uint64_t outCloseTimeResolution = + info.closeTimeResolution.count(); + const std::uint64_t outCloseFlags = info.closeFlags; + const std::string outAccountHash = to_string(info.accountHash); + const std::string outTxHash = to_string(info.txHash); auto const& ledger = json[jss::result][jss::ledger]; return outHash == ledger[jss::hash].asString() && outSeq == seq && @@ -125,15 +119,10 @@ class SHAMapStore_test : public beast::unit_test::suite void ledgerCheck(jtx::Env& env, int const rows, int const first) { - auto db = env.app().getLedgerDB().checkoutDb(); - - int actualRows, actualFirst, actualLast; - *db << "SELECT count(*) AS rows, " - "min(LedgerSeq) as first, " - "max(LedgerSeq) as last " - "FROM Ledgers;", - soci::into(actualRows), soci::into(actualFirst), - soci::into(actualLast); + const auto [actualRows, actualFirst, actualLast] = + dynamic_cast( + &env.app().getRelationalDBInterface()) + ->getLedgerCountMinMax(); BEAST_EXPECT(actualRows == rows); BEAST_EXPECT(actualFirst == first); @@ -143,27 +132,19 @@ class SHAMapStore_test : public beast::unit_test::suite void transactionCheck(jtx::Env& env, int const rows) { - auto db = env.app().getTxnDB().checkoutDb(); - - int actualRows; - *db << "SELECT count(*) AS rows " - "FROM Transactions;", - soci::into(actualRows); - - BEAST_EXPECT(actualRows == rows); + BEAST_EXPECT( + dynamic_cast( + &env.app().getRelationalDBInterface()) + ->getTransactionCount() == rows); } void accountTransactionCheck(jtx::Env& env, int const rows) { - auto db = env.app().getTxnDB().checkoutDb(); - - int actualRows; - *db << "SELECT count(*) AS rows " - "FROM AccountTransactions;", - soci::into(actualRows); - - BEAST_EXPECT(actualRows == rows); + BEAST_EXPECT( + dynamic_cast( + &env.app().getRelationalDBInterface()) + ->getAccountTransactionCount() == rows); } int diff --git a/src/test/core/SociDB_test.cpp b/src/test/core/SociDB_test.cpp index b9e348b323..875af9aa05 100644 --- a/src/test/core/SociDB_test.cpp +++ b/src/test/core/SociDB_test.cpp @@ -108,7 +108,7 @@ public: for (auto const& i : d) { - SociConfig sc(c, i.first); + DBConfig sc(c, i.first); BEAST_EXPECT( boost::ends_with(sc.connectionString(), i.first + i.second)); } @@ -119,7 +119,7 @@ public: testcase("open"); BasicConfig c; setupSQLiteConfig(c, getDatabasePath()); - SociConfig sc(c, "SociTestDB"); + DBConfig sc(c, "SociTestDB"); std::vector const stringData( {"String1", "String2", "String3"}); std::vector const intData({1, 2, 3}); @@ -180,7 +180,7 @@ public: testcase("select"); BasicConfig c; setupSQLiteConfig(c, getDatabasePath()); - SociConfig sc(c, "SociTestDB"); + DBConfig sc(c, "SociTestDB"); std::vector const ubid( {(std::uint64_t)std::numeric_limits::max(), 20, 30}); std::vector const bid({-10, -20, -30}); @@ -312,7 +312,7 @@ public: testcase("deleteWithSubselect"); BasicConfig c; setupSQLiteConfig(c, getDatabasePath()); - SociConfig sc(c, "SociTestDB"); + DBConfig sc(c, "SociTestDB"); { soci::session s; sc.open(s); diff --git a/src/test/rpc/Fee_test.cpp b/src/test/rpc/Fee_test.cpp index 635a879f55..17ada929e2 100644 --- a/src/test/rpc/Fee_test.cpp +++ b/src/test/rpc/Fee_test.cpp @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/src/test/rpc/ShardArchiveHandler_test.cpp b/src/test/rpc/ShardArchiveHandler_test.cpp index d4452fc295..89c2d822f3 100644 --- a/src/test/rpc/ShardArchiveHandler_test.cpp +++ b/src/test/rpc/ShardArchiveHandler_test.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include #include @@ -81,19 +82,14 @@ public: { std::lock_guard lock(handler->m_); - - auto& session{handler->sqliteDB_->getSession()}; - - soci::rowset rs = - (session.prepare << "SELECT * FROM State;"); - std::uint64_t rowCount = 0; - for (auto it = rs.begin(); it != rs.end(); ++it, ++rowCount) - { - BEAST_EXPECT(it->get(0) == 1); - BEAST_EXPECT(it->get(1) == rawUrl); - } + readArchiveDB( + *handler->sqlDB_, [&](std::string const& url, int state) { + BEAST_EXPECT(state == 1); + BEAST_EXPECT(url == rawUrl); + ++rowCount; + }); BEAST_EXPECT(rowCount == 1); } @@ -136,17 +132,14 @@ public: { std::lock_guard lock(handler->m_); - - auto& session{handler->sqliteDB_->getSession()}; - soci::rowset rs = - (session.prepare << "SELECT * FROM State;"); - std::uint64_t pos = 0; - for (auto it = rs.begin(); it != rs.end(); ++it, ++pos) - { - BEAST_EXPECT(it->get(0) == dl[pos].first); - BEAST_EXPECT(it->get(1) == dl[pos].second); - } + + readArchiveDB( + *handler->sqlDB_, [&](std::string const& url, int state) { + BEAST_EXPECT(state == dl[pos].first); + BEAST_EXPECT(url == dl[pos].second); + ++pos; + }); BEAST_EXPECT(pos == dl.size()); } diff --git a/src/test/rpc/Transaction_test.cpp b/src/test/rpc/Transaction_test.cpp index 33cadf7460..a20a20aa61 100644 --- a/src/test/rpc/Transaction_test.cpp +++ b/src/test/rpc/Transaction_test.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include #include #include #include @@ -119,9 +119,9 @@ class Transaction_test : public beast::unit_test::suite const auto deletedLedger = (startLegSeq + endLegSeq) / 2; { // Remove one of the ledgers from the database directly - auto db = env.app().getTxnDB().checkoutDb(); - *db << "DELETE FROM Transactions WHERE LedgerSeq == " - << deletedLedger << ";"; + dynamic_cast( + &env.app().getRelationalDBInterface()) + ->deleteTransactionByLedgerSeq(deletedLedger); } for (int deltaEndSeq = 0; deltaEndSeq < 2; ++deltaEndSeq) diff --git a/src/test/rpc/Tx_test.cpp b/src/test/rpc/Tx_test.cpp index 1658a0ebd5..e4e0507b99 100644 --- a/src/test/rpc/Tx_test.cpp +++ b/src/test/rpc/Tx_test.cpp @@ -19,8 +19,8 @@ #include #include +#include #include -#include #include #include #include @@ -769,9 +769,9 @@ class Tx_test : public beast::unit_test::suite const auto deletedLedger = (startLegSeq + endLegSeq) / 2; { // Remove one of the ledgers from the database directly - auto db = env.app().getTxnDB().checkoutDb(); - *db << "DELETE FROM Transactions WHERE LedgerSeq == " - << deletedLedger << ";"; + dynamic_cast( + &env.app().getRelationalDBInterface()) + ->deleteTransactionByLedgerSeq(deletedLedger); } for (bool b : {false, true})