From 21863b05f3dbb601a8df2eba376e8fd9496f5890 Mon Sep 17 00:00:00 2001 From: RichardAH Date: Sat, 23 Nov 2024 21:19:09 +1000 Subject: [PATCH 01/33] Limit xahau genesis to networks starting with 2133X (#395) --- src/ripple/app/tx/impl/Change.cpp | 7 ++ src/test/app/XahauGenesis_test.cpp | 147 ++++++++++++++++++++++++----- 2 files changed, 128 insertions(+), 26 deletions(-) diff --git a/src/ripple/app/tx/impl/Change.cpp b/src/ripple/app/tx/impl/Change.cpp index 1c087ff66..c91b79403 100644 --- a/src/ripple/app/tx/impl/Change.cpp +++ b/src/ripple/app/tx/impl/Change.cpp @@ -458,6 +458,13 @@ Change::activateXahauGenesis() bool const isTest = (ctx_.tx.getFlags() & tfTestSuite) && ctx_.app.config().standalone(); + // RH NOTE: we'll only configure xahau governance structure on networks that + // begin with 2133... so production xahau: 21337 and its testnet 21338 + // with 21330-21336 and 21339 also valid and reserved for dev nets etc. + // all other Network IDs will be conventionally configured. + if ((ctx_.app.config().NETWORK_ID / 10) != 2133 && !isTest) + return; + auto [ng_entries, l1_entries, l2_entries, gov_params] = normalizeXahauGenesis( isTest ? TestNonGovernanceDistribution : NonGovernanceDistribution, diff --git a/src/test/app/XahauGenesis_test.cpp b/src/test/app/XahauGenesis_test.cpp index aa546a116..d143591e0 100644 --- a/src/test/app/XahauGenesis_test.cpp +++ b/src/test/app/XahauGenesis_test.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,7 @@ #include #include #include +#include #include #define BEAST_REQUIRE(x) \ @@ -59,7 +61,18 @@ maybe_to_string(T val, std::enable_if_t, int> = 0) using namespace XahauGenesis; namespace ripple { + +inline std::unique_ptr +makeNetworkConfig(uint32_t networkID) +{ + using namespace test::jtx; + return envconfig([&](std::unique_ptr cfg) { + cfg->NETWORK_ID = networkID; + return cfg; + }); +} namespace test { + /* Accounts used in this test suite: alice: AE123A8556F3CF91154711376AFB0F894F832B3D, @@ -125,7 +138,8 @@ struct XahauGenesis_test : public beast::unit_test::suite bool burnedViaTest = false, // means the calling test already burned some of the genesis bool skipTests = false, - bool const testFlag = false) + bool const testFlag = false, + bool const badNetID = false) { using namespace jtx; @@ -183,6 +197,20 @@ struct XahauGenesis_test : public beast::unit_test::suite if (skipTests) return; + if (badNetID) + { + BEAST_EXPECT( + 100000000000000000ULL == + env.app().getLedgerMaster().getClosedLedger()->info().drops); + + auto genesisAccRoot = env.le(keylet::account(genesisAccID)); + BEAST_REQUIRE(!!genesisAccRoot); + BEAST_EXPECT( + genesisAccRoot->getFieldAmount(sfBalance) == + XRPAmount(100000000000000000ULL)); + return; + } + // sum the initial distribution balances, these should equal total coins // in the closed ledger std::vector> const& l1membership = @@ -442,17 +470,59 @@ struct XahauGenesis_test : public beast::unit_test::suite { testcase("Test activation"); using namespace jtx; - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; activate(__LINE__, env, false, false, false); } + void + testBadNetworkIDActivation(FeatureBitset features) + { + testcase("Test Bad Network ID activation"); + using namespace jtx; + std::vector badNetIDs{ + 0, + 1, + 2, + 10, + 100, + 1000, + 10000, + 20000, + 21000, + 21328, + 21329, + 21340, + 21341, + 65535}; + + for (int netid : badNetIDs) + { + Env env{ + *this, + makeNetworkConfig(netid), + features - featureXahauGenesis}; + activate(__LINE__, env, false, false, false, true); + } + + for (int netid = 21330; netid <= 21339; ++netid) + { + Env env{ + *this, + makeNetworkConfig(netid), + features - featureXahauGenesis}; + activate(__LINE__, env, false, false, false, false); + } + } + void testWithSignerList(FeatureBitset features) { using namespace jtx; testcase("Test signerlist"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; Account const alice{"alice", KeyType::ed25519}; env.fund(XRP(1000), alice); @@ -468,7 +538,8 @@ struct XahauGenesis_test : public beast::unit_test::suite { using namespace jtx; testcase("Test regkey"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; env.memoize(env.master); Account const alice("alice"); @@ -667,7 +738,11 @@ struct XahauGenesis_test : public beast::unit_test::suite { using namespace jtx; testcase("Test governance membership voting L1"); - Env env{*this, envconfig(), features - featureXahauGenesis, nullptr}; + Env env{ + *this, + makeNetworkConfig(21337), + features - featureXahauGenesis, + nullptr}; auto const alice = Account("alice"); auto const bob = Account("bob"); @@ -2111,7 +2186,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace jtx; testcase("Test governance membership voting L2"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; auto const alice = Account("alice"); auto const bob = Account("bob"); @@ -3708,7 +3784,7 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test last close time"); - Env env{*this, envconfig(), features}; + Env env{*this, makeNetworkConfig(21337), features}; validateTime(lastClose(env), 0); // last close = 0 @@ -3738,7 +3814,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace jtx; testcase("test claim reward rate is == 0"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; STAmount const feesXRP = XRP(1); @@ -3783,7 +3860,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace jtx; testcase("test claim reward rate is > 1"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; STAmount const feesXRP = XRP(1); @@ -3828,7 +3906,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace jtx; testcase("test claim reward delay is == 0"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; STAmount const feesXRP = XRP(1); @@ -3873,7 +3952,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace jtx; testcase("test claim reward delay is < 0"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; STAmount const feesXRP = XRP(1); @@ -3918,7 +3998,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace jtx; testcase("test claim reward before time"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; STAmount const feesXRP = XRP(1); @@ -3968,7 +4049,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test claim reward valid without unl report"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; bool const has240819 = env.current()->rules().enabled(fix240819); double const rateDrops = 0.00333333333 * 1'000'000; @@ -4115,7 +4197,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test claim reward valid with unl report"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -4250,7 +4333,7 @@ struct XahauGenesis_test : public beast::unit_test::suite { FeatureBitset _features = features - featureXahauGenesis; auto const amend = withXahauV1 ? _features : _features - fixXahauV1; - Env env{*this, envconfig(), amend}; + Env env{*this, makeNetworkConfig(21337), amend}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -4387,7 +4470,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test claim reward optin optout"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; bool const has240819 = env.current()->rules().enabled(fix240819); double const rateDrops = 0.00333333333 * 1'000'000; @@ -4499,7 +4583,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test claim reward bal == 1"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -4587,7 +4672,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test claim reward elapsed_since_last == 1"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -4668,7 +4754,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test claim reward elapsed_since_last == 0"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; STAmount const feesXRP = XRP(1); @@ -4929,7 +5016,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test compound interest over 12 claims"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -5027,7 +5115,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test deposit"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -5117,7 +5206,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test deposit withdraw"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -5209,7 +5299,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test deposit late"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -5299,7 +5390,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test deposit late withdraw"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -5392,7 +5484,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test no claim"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -5480,7 +5573,8 @@ struct XahauGenesis_test : public beast::unit_test::suite using namespace std::chrono_literals; testcase("test no claim late"); - Env env{*this, envconfig(), features - featureXahauGenesis}; + Env env{ + *this, makeNetworkConfig(21337), features - featureXahauGenesis}; double const rateDrops = 0.00333333333 * 1'000'000; STAmount const feesXRP = XRP(1); @@ -5594,6 +5688,7 @@ struct XahauGenesis_test : public beast::unit_test::suite testGovernHookWithFeats(FeatureBitset features) { testPlainActivation(features); + testBadNetworkIDActivation(features); testWithSignerList(features); testWithRegularKey(features); testGovernanceL1(features); From e086724772b68a96732d65cf4996277e164aa267 Mon Sep 17 00:00:00 2001 From: Richard Holland Date: Wed, 13 Nov 2024 11:35:59 +1100 Subject: [PATCH 02/33] UDP RPC (admin) support (#390) --- src/ripple/app/misc/NetworkOPs.cpp | 3 +- src/ripple/rpc/handlers/Subscribe.cpp | 10 +- src/ripple/rpc/handlers/Unsubscribe.cpp | 7 + src/ripple/rpc/impl/ServerHandlerImp.cpp | 200 ++++++++++++++++ src/ripple/rpc/impl/ServerHandlerImp.h | 15 ++ src/ripple/rpc/impl/UDPInfoSub.h | 140 +++++++++++ src/ripple/server/Port.h | 9 + src/ripple/server/impl/Port.cpp | 7 + src/ripple/server/impl/ServerImpl.h | 28 ++- src/ripple/server/impl/UDPDoor.h | 284 +++++++++++++++++++++++ src/test/server/Server_test.cpp | 16 ++ 11 files changed, 712 insertions(+), 7 deletions(-) create mode 100644 src/ripple/rpc/impl/UDPInfoSub.h create mode 100644 src/ripple/server/impl/UDPDoor.h diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 6f6bfcc1c..637f7725e 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -67,9 +67,10 @@ #include #include #include +#include #include #include - +#include #include #include #include diff --git a/src/ripple/rpc/handlers/Subscribe.cpp b/src/ripple/rpc/handlers/Subscribe.cpp index f17aa62b6..90cf682e1 100644 --- a/src/ripple/rpc/handlers/Subscribe.cpp +++ b/src/ripple/rpc/handlers/Subscribe.cpp @@ -30,6 +30,7 @@ #include #include #include +#include namespace ripple { @@ -42,7 +43,7 @@ doSubscribe(RPC::JsonContext& context) if (!context.infoSub && !context.params.isMember(jss::url)) { // Must be a JSON-RPC call. - JLOG(context.j.info()) << "doSubscribe: RPC subscribe requires a url"; + JLOG(context.j.warn()) << "doSubscribe: RPC subscribe requires a url"; return rpcError(rpcINVALID_PARAMS); } @@ -373,6 +374,13 @@ doSubscribe(RPC::JsonContext& context) } } + if (ispSub) + { + if (std::shared_ptr udp = + std::dynamic_pointer_cast(ispSub)) + udp->increment(); + } + return jvResult; } diff --git a/src/ripple/rpc/handlers/Unsubscribe.cpp b/src/ripple/rpc/handlers/Unsubscribe.cpp index 8a606a26d..4df234cd2 100644 --- a/src/ripple/rpc/handlers/Unsubscribe.cpp +++ b/src/ripple/rpc/handlers/Unsubscribe.cpp @@ -25,6 +25,7 @@ #include #include #include +#include namespace ripple { @@ -245,6 +246,12 @@ doUnsubscribe(RPC::JsonContext& context) context.netOps.tryRemoveRpcSub(context.params[jss::url].asString()); } + if (ispSub) + { + if (auto udp = std::dynamic_pointer_cast(ispSub)) + udp->destroy(); + } + return jvResult; } diff --git a/src/ripple/rpc/impl/ServerHandlerImp.cpp b/src/ripple/rpc/impl/ServerHandlerImp.cpp index 81075a5c0..c4e41fa27 100644 --- a/src/ripple/rpc/impl/ServerHandlerImp.cpp +++ b/src/ripple/rpc/impl/ServerHandlerImp.cpp @@ -361,6 +361,67 @@ ServerHandlerImp::onWSMessage( } } +void +ServerHandlerImp::onUDPMessage( + std::string const& message, + boost::asio::ip::tcp::endpoint const& remoteEndpoint, + std::function sendResponse) +{ + Json::Value jv; + if (message.size() > RPC::Tuning::maxRequestSize || + !Json::Reader{}.parse(message, jv) || !jv.isObject()) + { + Json::Value jvResult(Json::objectValue); + jvResult[jss::type] = jss::error; + jvResult[jss::error] = "jsonInvalid"; + jvResult[jss::value] = message; + + std::string const response = to_string(jvResult); + JLOG(m_journal.trace()) + << "UDP sending error response: '" << jvResult << "'"; + sendResponse(response); + return; + } + + JLOG(m_journal.trace()) + << "UDP received '" << jv << "' from " << remoteEndpoint; + + auto const postResult = m_jobQueue.postCoro( + jtCLIENT_RPC, // Using RPC job type since this is admin RPC + "UDP-RPC", + [this, + remoteEndpoint, + jv = std::move(jv), + sendResponse = std::move(sendResponse)]( + std::shared_ptr const& coro) { + // Process the request similar to WebSocket but with UDP context + Role const role = Role::ADMIN; // UDP-RPC is admin-only + auto const jr = + this->processUDP(jv, role, coro, sendResponse, remoteEndpoint); + + std::string const response = to_string(jr); + JLOG(m_journal.trace()) + << "UDP sending '" << jr << "' to " << remoteEndpoint; + + // Send response back via UDP + sendResponse(response); + }); + + if (postResult == nullptr) + { + // Request rejected, probably shutting down + Json::Value jvResult(Json::objectValue); + jvResult[jss::type] = jss::error; + jvResult[jss::error] = "serverShuttingDown"; + jvResult[jss::value] = "Server is shutting down"; + + std::string const response = to_string(jvResult); + JLOG(m_journal.trace()) + << "UDP sending shutdown response to " << remoteEndpoint; + sendResponse(response); + } +} + void ServerHandlerImp::onClose(Session& session, boost::system::error_code const&) { @@ -397,6 +458,145 @@ logDuration( << " microseconds. request = " << request; } +Json::Value +ServerHandlerImp::processUDP( + Json::Value const& jv, + Role const& role, + std::shared_ptr const& coro, + std::optional> + sendResponse /* used for subscriptions */, + boost::asio::ip::tcp::endpoint const& remoteEndpoint) +{ + std::shared_ptr is; + // Requests without "command" are invalid. + Json::Value jr(Json::objectValue); + try + { + auto apiVersion = + RPC::getAPIVersionNumber(jv, app_.config().BETA_RPC_API); + if (apiVersion == RPC::apiInvalidVersion || + (!jv.isMember(jss::command) && !jv.isMember(jss::method)) || + (jv.isMember(jss::command) && !jv[jss::command].isString()) || + (jv.isMember(jss::method) && !jv[jss::method].isString()) || + (jv.isMember(jss::command) && jv.isMember(jss::method) && + jv[jss::command].asString() != jv[jss::method].asString())) + { + jr[jss::type] = jss::response; + jr[jss::status] = jss::error; + jr[jss::error] = apiVersion == RPC::apiInvalidVersion + ? jss::invalid_API_version + : jss::missingCommand; + jr[jss::request] = jv; + if (jv.isMember(jss::id)) + jr[jss::id] = jv[jss::id]; + if (jv.isMember(jss::jsonrpc)) + jr[jss::jsonrpc] = jv[jss::jsonrpc]; + if (jv.isMember(jss::ripplerpc)) + jr[jss::ripplerpc] = jv[jss::ripplerpc]; + if (jv.isMember(jss::api_version)) + jr[jss::api_version] = jv[jss::api_version]; + + return jr; + } + + auto required = RPC::roleRequired( + apiVersion, + app_.config().BETA_RPC_API, + jv.isMember(jss::command) ? jv[jss::command].asString() + : jv[jss::method].asString()); + if (Role::FORBID == role) + { + jr[jss::result] = rpcError(rpcFORBIDDEN); + } + else + { + Resource::Consumer c; + Resource::Charge loadType = Resource::feeReferenceRPC; + + if (sendResponse.has_value()) + is = UDPInfoSub::getInfoSub( + m_networkOPs, *sendResponse, remoteEndpoint); + + RPC::JsonContext context{ + {app_.journal("RPCHandler"), + app_, + loadType, + app_.getOPs(), + app_.getLedgerMaster(), + c, + role, + coro, + is, + apiVersion}, + jv}; + + auto start = std::chrono::system_clock::now(); + RPC::doCommand(context, jr[jss::result]); + auto end = std::chrono::system_clock::now(); + logDuration(jv, end - start, m_journal); + } + } + catch (std::exception const& ex) + { + jr[jss::result] = RPC::make_error(rpcINTERNAL); + JLOG(m_journal.error()) + << "Exception while processing WS: " << ex.what() << "\n" + << "Input JSON: " << Json::Compact{Json::Value{jv}}; + } + + if (is) + { + if (auto udp = std::dynamic_pointer_cast(is)) + udp->destroy(); + } + + // Currently we will simply unwrap errors returned by the RPC + // API, in the future maybe we can make the responses + // consistent. + // + // Regularize result. This is duplicate code. + if (jr[jss::result].isMember(jss::error)) + { + jr = jr[jss::result]; + jr[jss::status] = jss::error; + + auto rq = jv; + + if (rq.isObject()) + { + if (rq.isMember(jss::passphrase.c_str())) + rq[jss::passphrase.c_str()] = ""; + if (rq.isMember(jss::secret.c_str())) + rq[jss::secret.c_str()] = ""; + if (rq.isMember(jss::seed.c_str())) + rq[jss::seed.c_str()] = ""; + if (rq.isMember(jss::seed_hex.c_str())) + rq[jss::seed_hex.c_str()] = ""; + } + + jr[jss::request] = rq; + } + else + { + if (jr[jss::result].isMember("forwarded") && + jr[jss::result]["forwarded"]) + jr = jr[jss::result]; + jr[jss::status] = jss::success; + } + + if (jv.isMember(jss::id)) + jr[jss::id] = jv[jss::id]; + if (jv.isMember(jss::jsonrpc)) + jr[jss::jsonrpc] = jv[jss::jsonrpc]; + if (jv.isMember(jss::ripplerpc)) + jr[jss::ripplerpc] = jv[jss::ripplerpc]; + if (jv.isMember(jss::api_version)) + jr[jss::api_version] = jv[jss::api_version]; + + jr[jss::type] = jss::response; + return jr; +} + Json::Value ServerHandlerImp::processSession( std::shared_ptr const& session, diff --git a/src/ripple/rpc/impl/ServerHandlerImp.h b/src/ripple/rpc/impl/ServerHandlerImp.h index 7c0bf9c9a..36ee6f5e2 100644 --- a/src/ripple/rpc/impl/ServerHandlerImp.h +++ b/src/ripple/rpc/impl/ServerHandlerImp.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -164,6 +165,12 @@ public: std::shared_ptr session, std::vector const& buffers); + void + onUDPMessage( + std::string const& message, + boost::asio::ip::tcp::endpoint const& remoteEndpoint, + std::function sendResponse); + void onClose(Session& session, boost::system::error_code const&); @@ -177,6 +184,14 @@ private: std::shared_ptr const& coro, Json::Value const& jv); + Json::Value + processUDP( + Json::Value const& jv, + Role const& role, + std::shared_ptr const& coro, + std::optional> sendResponse, + boost::asio::ip::tcp::endpoint const& remoteEndpoint); + void processSession( std::shared_ptr const&, diff --git a/src/ripple/rpc/impl/UDPInfoSub.h b/src/ripple/rpc/impl/UDPInfoSub.h new file mode 100644 index 000000000..4766b7b07 --- /dev/null +++ b/src/ripple/rpc/impl/UDPInfoSub.h @@ -0,0 +1,140 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_RPC_UDPINFOSUB_H +#define RIPPLE_RPC_UDPINFOSUB_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +class UDPInfoSub : public InfoSub +{ + std::function send_; + boost::asio::ip::tcp::endpoint endpoint_; + + UDPInfoSub( + Source& source, + std::function& sendResponse, + boost::asio::ip::tcp::endpoint const& remoteEndpoint) + : InfoSub(source), send_(sendResponse), endpoint_(remoteEndpoint) + { + } + + struct RefCountedSub + { + std::shared_ptr sub; + size_t refCount; + + RefCountedSub(std::shared_ptr s) + : sub(std::move(s)), refCount(1) + { + } + }; + + static inline std::mutex mtx_; + static inline std::map map_; + +public: + static std::shared_ptr + getInfoSub( + Source& source, + std::function& sendResponse, + boost::asio::ip::tcp::endpoint const& remoteEndpoint) + { + std::lock_guard lock(mtx_); + + auto it = map_.find(remoteEndpoint); + if (it != map_.end()) + { + it->second.refCount++; + return it->second.sub; + } + + auto sub = std::shared_ptr( + new UDPInfoSub(source, sendResponse, remoteEndpoint)); + map_.emplace(remoteEndpoint, RefCountedSub(sub)); + return sub; + } + + static bool + increment(boost::asio::ip::tcp::endpoint const& remoteEndpoint) + { + std::lock_guard lock(mtx_); + + auto it = map_.find(remoteEndpoint); + if (it != map_.end()) + { + it->second.refCount++; + return true; + } + return false; + } + + bool + increment() + { + return increment(endpoint_); + } + + static bool + destroy(boost::asio::ip::tcp::endpoint const& remoteEndpoint) + { + std::lock_guard lock(mtx_); + + auto it = map_.find(remoteEndpoint); + if (it != map_.end()) + { + if (--it->second.refCount == 0) + { + map_.erase(it); + return true; + } + } + return false; + } + + bool + destroy() + { + return destroy(endpoint_); + } + + void + send(Json::Value const& jv, bool) override + { + std::string const str = to_string(jv); + send_(str); + } + + boost::asio::ip::tcp::endpoint const& + endpoint() const + { + return endpoint_; + } +}; +} // namespace ripple +#endif diff --git a/src/ripple/server/Port.h b/src/ripple/server/Port.h index 9dccfdf9c..438d521ea 100644 --- a/src/ripple/server/Port.h +++ b/src/ripple/server/Port.h @@ -86,6 +86,15 @@ struct Port // Returns a string containing the list of protocols std::string protocols() const; + + bool + has_udp() const + { + return protocol.count("udp") > 0; + } + + // Maximum UDP packet size (default 64KB) + std::size_t udp_packet_size = 65536; }; std::ostream& diff --git a/src/ripple/server/impl/Port.cpp b/src/ripple/server/impl/Port.cpp index 1b869f6a5..a3e88d5cd 100644 --- a/src/ripple/server/impl/Port.cpp +++ b/src/ripple/server/impl/Port.cpp @@ -244,6 +244,13 @@ parse_Port(ParsedPort& port, Section const& section, std::ostream& log) optResult->begin(), optResult->end())) port.protocol.insert(s); } + + if (port.protocol.count("udp") > 0 && port.protocol.size() > 1) + { + log << "Port " << section.name() + << " cannot mix UDP with other protocols"; + Throw(); + } } { diff --git a/src/ripple/server/impl/ServerImpl.h b/src/ripple/server/impl/ServerImpl.h index a3abf7891..8c0622396 100644 --- a/src/ripple/server/impl/ServerImpl.h +++ b/src/ripple/server/impl/ServerImpl.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -162,18 +163,35 @@ ServerImpl::ports(std::vector const& ports) { if (closed()) Throw("ports() on closed Server"); + ports_.reserve(ports.size()); Endpoints eps; eps.reserve(ports.size()); + for (auto const& port : ports) { ports_.push_back(port); - if (auto sp = ios_.emplace>( - handler_, io_service_, ports_.back(), j_)) + + if (port.has_udp()) { - list_.push_back(sp); - eps.push_back(sp->get_endpoint()); - sp->run(); + // UDP-RPC door + if (auto sp = ios_.emplace>( + handler_, io_service_, ports_.back(), j_)) + { + eps.push_back(sp->get_endpoint()); + sp->run(); + } + } + else + { + // Standard TCP door + if (auto sp = ios_.emplace>( + handler_, io_service_, ports_.back(), j_)) + { + list_.push_back(sp); + eps.push_back(sp->get_endpoint()); + sp->run(); + } } } return eps; diff --git a/src/ripple/server/impl/UDPDoor.h b/src/ripple/server/impl/UDPDoor.h new file mode 100644 index 000000000..60e417d2c --- /dev/null +++ b/src/ripple/server/impl/UDPDoor.h @@ -0,0 +1,284 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_SERVER_UDPDOOR_H_INCLUDED +#define RIPPLE_SERVER_UDPDOOR_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +template +class UDPDoor : public io_list::work, + public std::enable_shared_from_this> +{ +private: + using error_code = boost::system::error_code; + using endpoint_type = boost::asio::ip::tcp::endpoint; + using udp_socket = boost::asio::ip::udp::socket; + + beast::Journal const j_; + Port const& port_; + Handler& handler_; + boost::asio::io_context& ioc_; + boost::asio::strand strand_; + udp_socket socket_; + std::vector recv_buffer_; + endpoint_type local_endpoint_; // Store TCP-style endpoint + +public: + UDPDoor( + Handler& handler, + boost::asio::io_context& io_context, + Port const& port, + beast::Journal j) + : j_(j) + , port_(port) + , handler_(handler) + , ioc_(io_context) + , strand_(io_context.get_executor()) + , socket_(io_context) + , recv_buffer_(port.udp_packet_size) + , local_endpoint_(port.ip, port.port) // Store as TCP endpoint + { + error_code ec; + + // Create UDP endpoint from port configuration + auto const addr = port_.ip.to_v4(); + boost::asio::ip::udp::endpoint udp_endpoint(addr, port_.port); + + socket_.open(boost::asio::ip::udp::v4(), ec); + if (ec) + { + JLOG(j_.error()) << "UDP socket open failed: " << ec.message(); + return; + } + + // Set socket options + socket_.set_option(boost::asio::socket_base::reuse_address(true), ec); + if (ec) + { + JLOG(j_.error()) + << "UDP set reuse_address failed: " << ec.message(); + return; + } + + socket_.bind(udp_endpoint, ec); + if (ec) + { + JLOG(j_.error()) << "UDP socket bind failed: " << ec.message(); + return; + } + + JLOG(j_.info()) << "UDP-RPC listening on " << udp_endpoint; + } + + endpoint_type + get_endpoint() const + { + return local_endpoint_; + } + + void + run() + { + if (!socket_.is_open()) + return; + + do_receive(); + } + + void + close() override + { + error_code ec; + socket_.close(ec); + } + +private: + void + do_receive() + { + if (!socket_.is_open()) + return; + + socket_.async_receive_from( + boost::asio::buffer(recv_buffer_), + sender_endpoint_, + boost::asio::bind_executor( + strand_, + std::bind( + &UDPDoor::on_receive, + this->shared_from_this(), + std::placeholders::_1, + std::placeholders::_2))); + } + + void + on_receive(error_code ec, std::size_t bytes_transferred) + { + if (ec) + { + if (ec != boost::asio::error::operation_aborted) + { + JLOG(j_.error()) << "UDP receive failed: " << ec.message(); + do_receive(); + } + return; + } + + // Convert UDP endpoint to TCP endpoint for compatibility + endpoint_type tcp_endpoint( + sender_endpoint_.address(), sender_endpoint_.port()); + + // Handle the received UDP message + handler_.onUDPMessage( + std::string(recv_buffer_.data(), bytes_transferred), + tcp_endpoint, + [this, tcp_endpoint](std::string const& response) { + do_send(response, tcp_endpoint); + }); + + do_receive(); + } + + void + do_send(std::string const& response, endpoint_type const& tcp_endpoint) + { + if (!socket_.is_open()) + { + std::cout << "UDP SOCKET NOT OPEN WHEN SENDING\n\n"; + return; + } + + const size_t HEADER_SIZE = 16; + const size_t MAX_DATAGRAM_SIZE = + 65487; // Allow for ipv6 header 40 bytes + 8 bytes of udp header + const size_t MAX_PAYLOAD_SIZE = MAX_DATAGRAM_SIZE - HEADER_SIZE; + + // Convert TCP endpoint back to UDP for sending + boost::asio::ip::udp::endpoint udp_endpoint( + tcp_endpoint.address(), tcp_endpoint.port()); + + // If message fits in single datagram, send normally + if (response.length() <= MAX_DATAGRAM_SIZE) + { + socket_.async_send_to( + boost::asio::buffer(response), + udp_endpoint, + boost::asio::bind_executor( + strand_, + [this, self = this->shared_from_this()]( + error_code ec, std::size_t bytes_transferred) { + if (ec && ec != boost::asio::error::operation_aborted) + { + JLOG(j_.error()) + << "UDP send failed: " << ec.message(); + } + })); + return; + } + + // Calculate number of packets needed + const size_t payload_size = MAX_PAYLOAD_SIZE; + const uint16_t total_packets = + (response.length() + payload_size - 1) / payload_size; + + // Get current timestamp in microseconds + auto now = std::chrono::system_clock::now(); + auto micros = std::chrono::duration_cast( + now.time_since_epoch()) + .count(); + uint64_t timestamp = static_cast(micros); + + // Send fragmented packets + for (uint16_t packet_num = 0; packet_num < total_packets; packet_num++) + { + std::string fragment; + fragment.reserve(MAX_DATAGRAM_SIZE); + + // Add header - 4 bytes of zeros + fragment.push_back(0); + fragment.push_back(0); + fragment.push_back(0); + fragment.push_back(0); + + // Add packet number (little endian) + fragment.push_back(packet_num & 0xFF); + fragment.push_back((packet_num >> 8) & 0xFF); + + // Add total packets (little endian) + fragment.push_back(total_packets & 0xFF); + fragment.push_back((total_packets >> 8) & 0xFF); + + // Add timestamp (8 bytes, little endian) + fragment.push_back(timestamp & 0xFF); + fragment.push_back((timestamp >> 8) & 0xFF); + fragment.push_back((timestamp >> 16) & 0xFF); + fragment.push_back((timestamp >> 24) & 0xFF); + fragment.push_back((timestamp >> 32) & 0xFF); + fragment.push_back((timestamp >> 40) & 0xFF); + fragment.push_back((timestamp >> 48) & 0xFF); + fragment.push_back((timestamp >> 56) & 0xFF); + + // Calculate payload slice + size_t start = packet_num * payload_size; + size_t length = std::min(payload_size, response.length() - start); + fragment.append(response.substr(start, length)); + + socket_.async_send_to( + boost::asio::buffer(fragment), + udp_endpoint, + boost::asio::bind_executor( + strand_, + [this, self = this->shared_from_this()]( + error_code ec, std::size_t bytes_transferred) { + if (ec && ec != boost::asio::error::operation_aborted) + { + JLOG(j_.error()) + << "UDP send failed: " << ec.message(); + } + })); + } + } + + boost::asio::ip::udp::endpoint sender_endpoint_; +}; + +} // namespace ripple + +#endif diff --git a/src/test/server/Server_test.cpp b/src/test/server/Server_test.cpp index b5eb71f36..d141b9ebc 100644 --- a/src/test/server/Server_test.cpp +++ b/src/test/server/Server_test.cpp @@ -144,6 +144,14 @@ public: { } + void + onUDPMessage( + std::string const& message, + boost::asio::ip::tcp::endpoint const& remoteEndpoint, + std::function sendResponse) + { + } + void onClose(Session& session, boost::system::error_code const&) { @@ -349,6 +357,14 @@ public: { } + void + onUDPMessage( + std::string const& message, + boost::asio::ip::tcp::endpoint const& remoteEndpoint, + std::function sendResponse) + { + } + void onClose(Session& session, boost::system::error_code const&) { From 542172f0a1ae21cca09444891f4bf8d3cd985148 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ekiserrep=C3=A9?= <126416117+Ekiserrepe@users.noreply.github.com> Date: Mon, 25 Nov 2024 23:52:49 +0100 Subject: [PATCH 03/33] Update README.md (#396) Updated Xaman link. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e73c03461..c9335cfc1 100644 --- a/README.md +++ b/README.md @@ -67,5 +67,5 @@ git-subtree. See those directories' README files for more details. - [explorer.xahau.network](https://explorer.xahau.network) - **Testnet & Faucet**: Test applications and obtain test XAH at [xahau-test.net](https://xahau-test.net) and use the testnet explorer at [explorer.xahau.network](https://explorer.xahau.network). - **Supporting Wallets**: A list of wallets that support XAH and Xahau-based assets. - - [Xumm](https://xumm.app) + - [Xaman](https://xaman.app) - [Crossmark](https://crossmark.io) From 9d54da3880b3c256823b80c88a01ba185f6c3733 Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Thu, 28 Nov 2024 10:20:44 +0100 Subject: [PATCH 04/33] Fix: failing assert (#397) --- src/ripple/nodestore/impl/Shard.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 8d0eab811..ac4af4782 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -710,10 +710,7 @@ Shard::finalize(bool writeSQLite, std::optional const& referenceHash) if (writeSQLite && !storeSQLite(ledger)) return fail("failed storing to SQLite databases"); - assert( - ledger->info().seq == ledgerSeq && - (ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || - ledger->read(keylet::fees()))); + assert(ledger->info().seq == ledgerSeq && ledger->read(keylet::fees())); hash = ledger->info().parentHash; next = std::move(ledger); From e9468d8b4a9af85ec4a17f1361af46ecb5dafd9e Mon Sep 17 00:00:00 2001 From: RichardAH Date: Wed, 11 Dec 2024 09:38:16 +1000 Subject: [PATCH 05/33] Datagram monitor (#400) Co-authored-by: Denis Angell --- Builds/CMake/RippledCore.cmake | 1 + src/ripple/app/ledger/LedgerMaster.h | 3 + src/ripple/app/ledger/impl/LedgerMaster.cpp | 7 + src/ripple/app/main/Application.cpp | 11 + src/ripple/app/misc/DatagramMonitor.h | 1052 +++++++++++++++++++ src/ripple/app/misc/NetworkOPs.cpp | 166 +-- src/ripple/app/misc/NetworkOPs.h | 41 +- src/ripple/app/misc/StateAccounting.cpp | 49 + src/ripple/app/misc/StateAccounting.h | 99 ++ src/ripple/core/Config.h | 2 + src/ripple/core/ConfigSections.h | 1 + src/ripple/core/impl/Config.cpp | 44 +- 12 files changed, 1294 insertions(+), 182 deletions(-) create mode 100644 src/ripple/app/misc/DatagramMonitor.h create mode 100644 src/ripple/app/misc/StateAccounting.cpp create mode 100644 src/ripple/app/misc/StateAccounting.h diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 5a25d2741..78843991f 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -392,6 +392,7 @@ target_sources (rippled PRIVATE src/ripple/app/misc/NegativeUNLVote.cpp src/ripple/app/misc/NetworkOPs.cpp src/ripple/app/misc/SHAMapStoreImp.cpp + src/ripple/app/misc/StateAccounting.cpp src/ripple/app/misc/detail/impl/WorkSSL.cpp src/ripple/app/misc/impl/AccountTxPaging.cpp src/ripple/app/misc/impl/AmendmentTable.cpp diff --git a/src/ripple/app/ledger/LedgerMaster.h b/src/ripple/app/ledger/LedgerMaster.h index 3d7adc862..040ef3bf6 100644 --- a/src/ripple/app/ledger/LedgerMaster.h +++ b/src/ripple/app/ledger/LedgerMaster.h @@ -152,6 +152,9 @@ public: std::string getCompleteLedgers(); + RangeSet + getCompleteLedgersRangeSet(); + /** Apply held transactions to the open ledger This is normally called as we close the ledger. The open ledger remains open to handle new transactions diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 844e9da48..4a3301a9c 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -1714,6 +1714,13 @@ LedgerMaster::getCompleteLedgers() return to_string(mCompleteLedgers); } +RangeSet +LedgerMaster::getCompleteLedgersRangeSet() +{ + std::lock_guard sl(mCompleteLock); + return mCompleteLedgers; +} + std::optional LedgerMaster::getCloseTimeBySeq(LedgerIndex ledgerIndex) { diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 9134df035..be0a7b46a 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -167,6 +168,8 @@ public: std::unique_ptr logs_; std::unique_ptr timeKeeper_; + std::unique_ptr datagram_monitor_; + std::uint64_t const instanceCookie_; beast::Journal m_journal; @@ -1523,6 +1526,14 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) if (reportingETL_) reportingETL_->start(); + // Datagram monitor if applicable + if (!config_->standalone() && config_->DATAGRAM_MONITOR != "") + { + datagram_monitor_ = std::make_unique(*this); + if (datagram_monitor_) + datagram_monitor_->start(); + } + return true; } diff --git a/src/ripple/app/misc/DatagramMonitor.h b/src/ripple/app/misc/DatagramMonitor.h new file mode 100644 index 000000000..ba6ce0213 --- /dev/null +++ b/src/ripple/app/misc/DatagramMonitor.h @@ -0,0 +1,1052 @@ +// +#ifndef RIPPLE_APP_MAIN_DATAGRAMMONITOR_H_INCLUDED +#define RIPPLE_APP_MAIN_DATAGRAMMONITOR_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(__linux__) +#include +#include +#elif defined(__APPLE__) +#include +#include +#include +#include +#include +#include +#include +#include +#endif +#include +#include + +namespace ripple { + +// Magic number for server info packets: 'XDGM' (le) Xahau DataGram Monitor +constexpr uint32_t SERVER_INFO_MAGIC = 0x4D474458; +constexpr uint32_t SERVER_INFO_VERSION = 1; + +// Warning flag bits +constexpr uint32_t WARNING_AMENDMENT_BLOCKED = 1 << 0; +constexpr uint32_t WARNING_UNL_BLOCKED = 1 << 1; +constexpr uint32_t WARNING_AMENDMENT_WARNED = 1 << 2; +constexpr uint32_t WARNING_NOT_SYNCED = 1 << 3; + +// Time window statistics for rates +struct [[gnu::packed]] MetricRates +{ + double rate_1m; // Average rate over last minute + double rate_5m; // Average rate over last 5 minutes + double rate_1h; // Average rate over last hour + double rate_24h; // Average rate over last 24 hours +}; + +struct AllRates +{ + MetricRates network_in; + MetricRates network_out; + MetricRates disk_read; + MetricRates disk_write; +}; + +// Structure to represent a ledger sequence range +struct [[gnu::packed]] LgrRange +{ + uint32_t start; + uint32_t end; +}; + +// Map is returned separately since variable-length data +// shouldn't be included in network structures +using ObjectCountMap = std::vector, int>>; + +struct [[gnu::packed]] DebugCounters +{ + // Database metrics + std::uint64_t dbKBTotal{0}; + std::uint64_t dbKBLedger{0}; + std::uint64_t dbKBTransaction{0}; + std::uint64_t localTxCount{0}; + + // Basic metrics + std::uint32_t writeLoad{0}; + std::int32_t historicalPerMinute{0}; + + // Cache metrics + std::uint32_t sleHitRate{0}; // Stored as fixed point, multiplied by 1000 + std::uint32_t ledgerHitRate{ + 0}; // Stored as fixed point, multiplied by 1000 + std::uint32_t alSize{0}; + std::uint32_t alHitRate{0}; // Stored as fixed point, multiplied by 1000 + std::int32_t fullbelowSize{0}; + std::uint32_t treenodeCacheSize{0}; + std::uint32_t treenodeTrackSize{0}; + + // Shard metrics + std::int32_t shardFullbelowSize{0}; + std::uint32_t shardTreenodeCacheSize{0}; + std::uint32_t shardTreenodeTrackSize{0}; + std::uint32_t shardWriteLoad{0}; + std::uint64_t shardNodeWrites{0}; + std::uint64_t shardNodeReadsTotal{0}; + std::uint64_t shardNodeReadsHit{0}; + std::uint64_t shardNodeWrittenBytes{0}; + std::uint64_t shardNodeReadBytes{0}; + + // Node store metrics (when not using shards) + std::uint64_t nodeWriteCount{0}; + std::uint64_t nodeWriteSize{0}; + std::uint64_t nodeFetchCount{0}; + std::uint64_t nodeFetchHitCount{0}; + std::uint64_t nodeFetchSize{0}; +}; + +// Core server metrics in the fixed header +struct [[gnu::packed]] ServerInfoHeader +{ + // Fixed header fields come first + uint32_t magic; // Magic number to identify packet type + uint32_t version; // Protocol version number + uint32_t network_id; // Network ID from config + uint32_t server_state; // Operating mode as enum + uint32_t peer_count; // Number of connected peers + uint32_t node_size; // Size category (0=tiny through 4=huge) + uint32_t cpu_cores; // CPU core count + uint32_t ledger_range_count; // Number of range entries + uint32_t warning_flags; // Warning flags (reduced size) + + uint32_t padding_1; // padding for alignment + + // 64-bit metrics + uint64_t timestamp; // System time in microseconds + uint64_t uptime; // Server uptime in seconds + uint64_t io_latency_us; // IO latency in microseconds + uint64_t validation_quorum; // Validation quorum count + uint64_t fetch_pack_size; // Size of fetch pack cache + uint64_t proposer_count; // Number of proposers in last close + uint64_t converge_time_ms; // Last convergence time in ms + uint64_t load_factor; // Load factor (scaled by 1M) + uint64_t load_base; // Load base value + uint64_t reserve_base; // Reserve base amount + uint64_t reserve_inc; // Reserve increment amount + uint64_t ledger_seq; // Latest ledger sequence + + // Fixed-size byte arrays + uint8_t ledger_hash[32]; // Latest ledger hash + uint8_t node_public_key[33]; // Node's public key + uint8_t padding2[7]; // Padding to maintain 8-byte alignment + uint8_t version_string[32]; + + // System metrics + uint64_t process_memory_pages; // Process memory usage in bytes + uint64_t system_memory_total; // Total system memory in bytes + uint64_t system_memory_free; // Free system memory in bytes + uint64_t system_memory_used; // Used system memory in bytes + uint64_t system_disk_total; // Total disk space in bytes + uint64_t system_disk_free; // Free disk space in bytes + uint64_t system_disk_used; // Used disk space in bytes + uint64_t io_wait_time; // IO wait time in milliseconds + double load_avg_1min; // 1 minute load average + double load_avg_5min; // 5 minute load average + double load_avg_15min; // 15 minute load average + + // State transition metrics + uint64_t state_transitions[5]; // Count for each operating mode + uint64_t state_durations[5]; // Duration in each mode + uint64_t initial_sync_us; // Initial sync duration + + // Network and disk rates remain unchanged + struct + { + MetricRates network_in; + MetricRates network_out; + MetricRates disk_read; + MetricRates disk_write; + } rates; + + DebugCounters dbg_counters; +}; + +// System metrics collected for rate calculations +struct SystemMetrics +{ + uint64_t timestamp; // When metrics were collected + uint64_t network_bytes_in; // Current total bytes in + uint64_t network_bytes_out; // Current total bytes out + uint64_t disk_bytes_read; // Current total bytes read + uint64_t disk_bytes_written; // Current total bytes written +}; + +class MetricsTracker +{ +private: + static constexpr size_t SAMPLES_1M = 60; // 1 sample/second for 1 minute + static constexpr size_t SAMPLES_5M = 300; // 1 sample/second for 5 minutes + static constexpr size_t SAMPLES_1H = 3600; // 1 sample/second for 1 hour + static constexpr size_t SAMPLES_24H = 1440; // 1 sample/minute for 24 hours + + std::vector samples_1m{SAMPLES_1M}; + std::vector samples_5m{SAMPLES_5M}; + std::vector samples_1h{SAMPLES_1H}; + std::vector samples_24h{SAMPLES_24H}; + + size_t index_1m{0}, index_5m{0}, index_1h{0}, index_24h{0}; + std::chrono::system_clock::time_point last_24h_sample{}; + + double + calculateRate( + const SystemMetrics& current, + const std::vector& samples, + size_t current_index, + size_t max_samples, + bool is_24h_window, + std::function metric_getter) + { + // If we don't have at least 2 samples, the rate is 0 + if (current_index < 2) + { + return 0.0; + } + + // Calculate time window based on the window type + uint64_t expected_window_micros; + if (is_24h_window) + { + expected_window_micros = + 24ULL * 60ULL * 60ULL * 1000000ULL; // 24 hours in microseconds + } + else + { + expected_window_micros = max_samples * + 1000000ULL; // window in seconds * 1,000,000 for microseconds + } + + // For any window where we don't have full data, we should scale the + // rate based on the actual time we have data for + uint64_t actual_window_micros = + current.timestamp - samples[0].timestamp; + double window_scale = std::min( + 1.0, + static_cast(actual_window_micros) / expected_window_micros); + + // Get the oldest valid sample + size_t oldest_index = (current_index >= max_samples) + ? ((current_index + 1) % max_samples) + : 0; + const auto& oldest = samples[oldest_index]; + + double elapsed = actual_window_micros / + 1000000.0; // Convert microseconds to seconds + + // Ensure we have a meaningful time difference + if (elapsed < 0.001) + { // Less than 1ms difference + return 0.0; + } + + uint64_t current_value = metric_getter(current); + uint64_t oldest_value = metric_getter(oldest); + + // Handle counter wraparound + uint64_t diff = (current_value >= oldest_value) + ? (current_value - oldest_value) + : (std::numeric_limits::max() - oldest_value + + current_value + 1); + + // Calculate the rate and scale it based on our window coverage + return (static_cast(diff) / elapsed) * window_scale; + } + + MetricRates + calculateMetricRates( + const SystemMetrics& current, + std::function metric_getter) + { + MetricRates rates; + rates.rate_1m = calculateRate( + current, samples_1m, index_1m, SAMPLES_1M, false, metric_getter); + rates.rate_5m = calculateRate( + current, samples_5m, index_5m, SAMPLES_5M, false, metric_getter); + rates.rate_1h = calculateRate( + current, samples_1h, index_1h, SAMPLES_1H, false, metric_getter); + rates.rate_24h = calculateRate( + current, samples_24h, index_24h, SAMPLES_24H, true, metric_getter); + return rates; + } + +public: + void + addSample(const SystemMetrics& metrics) + { + auto now = std::chrono::system_clock::now(); + + // Update 1-minute window (every second) + samples_1m[index_1m++ % SAMPLES_1M] = metrics; + + // Update 5-minute window (every second) + samples_5m[index_5m++ % SAMPLES_5M] = metrics; + + // Update 1-hour window (every second) + samples_1h[index_1h++ % SAMPLES_1H] = metrics; + + // Update 24-hour window (every minute) + if (last_24h_sample + std::chrono::minutes(1) <= now) + { + samples_24h[index_24h++ % SAMPLES_24H] = metrics; + last_24h_sample = now; + } + } + + AllRates + getRates(const SystemMetrics& current) + { + AllRates rates; + rates.network_in = calculateMetricRates( + current, [](const SystemMetrics& m) { return m.network_bytes_in; }); + rates.network_out = calculateMetricRates( + current, + [](const SystemMetrics& m) { return m.network_bytes_out; }); + rates.disk_read = calculateMetricRates( + current, [](const SystemMetrics& m) { return m.disk_bytes_read; }); + rates.disk_write = calculateMetricRates( + current, + [](const SystemMetrics& m) { return m.disk_bytes_written; }); + return rates; + } +}; + +class DatagramMonitor +{ +private: + Application& app_; + std::atomic running_{false}; + std::thread monitor_thread_; + MetricsTracker metrics_tracker_; + + struct EndpointInfo + { + std::string ip; + uint16_t port; + bool is_ipv6; + }; + EndpointInfo + parseEndpoint(std::string const& endpoint) + { + auto space_pos = endpoint.find(' '); + if (space_pos == std::string::npos) + throw std::runtime_error("Invalid endpoint format"); + + EndpointInfo info; + info.ip = endpoint.substr(0, space_pos); + info.port = std::stoi(endpoint.substr(space_pos + 1)); + info.is_ipv6 = info.ip.find(':') != std::string::npos; + return info; + } + + int + createSocket(EndpointInfo const& endpoint) + { + int sock = socket(endpoint.is_ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, 0); + if (sock < 0) + throw std::runtime_error("Failed to create socket"); + return sock; + } + + void + sendPacket( + int sock, + EndpointInfo const& endpoint, + std::vector const& buffer) + { + struct sockaddr_storage addr; + socklen_t addr_len; + + if (endpoint.is_ipv6) + { + struct sockaddr_in6* addr6 = + reinterpret_cast(&addr); + addr6->sin6_family = AF_INET6; + addr6->sin6_port = htons(endpoint.port); + inet_pton(AF_INET6, endpoint.ip.c_str(), &addr6->sin6_addr); + addr_len = sizeof(struct sockaddr_in6); + } + else + { + struct sockaddr_in* addr4 = + reinterpret_cast(&addr); + addr4->sin_family = AF_INET; + addr4->sin_port = htons(endpoint.port); + inet_pton(AF_INET, endpoint.ip.c_str(), &addr4->sin_addr); + addr_len = sizeof(struct sockaddr_in); + } + + sendto( + sock, + buffer.data(), + buffer.size(), + 0, + reinterpret_cast(&addr), + addr_len); + } + + // Returns both the counters and object count map separately + std::pair + getDebugCounters() + { + DebugCounters counters; + ObjectCountMap objectCounts = + CountedObjects::getInstance().getCounts(1); + + // Database metrics if applicable + if (!app_.config().reporting() && app_.config().useTxTables()) + { + auto const db = + dynamic_cast(&app_.getRelationalDatabase()); + if (!db) + Throw("Failed to get relational database"); + + if (auto dbKB = db->getKBUsedAll()) + counters.dbKBTotal = dbKB; + if (auto dbKB = db->getKBUsedLedger()) + counters.dbKBLedger = dbKB; + if (auto dbKB = db->getKBUsedTransaction()) + counters.dbKBTransaction = dbKB; + if (auto count = app_.getOPs().getLocalTxCount()) + counters.localTxCount = count; + } + + // Basic metrics + counters.writeLoad = app_.getNodeStore().getWriteLoad(); + counters.historicalPerMinute = + static_cast(app_.getInboundLedgers().fetchRate()); + + // Cache metrics - convert floating point rates to fixed point + counters.sleHitRate = + static_cast(app_.cachedSLEs().rate() * 1000); + counters.ledgerHitRate = static_cast( + app_.getLedgerMaster().getCacheHitRate() * 1000); + counters.alSize = app_.getAcceptedLedgerCache().size(); + counters.alHitRate = static_cast( + app_.getAcceptedLedgerCache().getHitRate() * 1000); + counters.fullbelowSize = static_cast( + app_.getNodeFamily().getFullBelowCache(0)->size()); + counters.treenodeCacheSize = + app_.getNodeFamily().getTreeNodeCache(0)->getCacheSize(); + counters.treenodeTrackSize = + app_.getNodeFamily().getTreeNodeCache(0)->getTrackSize(); + + // Handle shard metrics if available + if (auto shardStore = app_.getShardStore()) + { + auto shardFamily = + dynamic_cast(app_.getShardFamily()); + auto const [cacheSz, trackSz] = shardFamily->getTreeNodeCacheSize(); + + counters.shardFullbelowSize = shardFamily->getFullBelowCacheSize(); + counters.shardTreenodeCacheSize = cacheSz; + counters.shardTreenodeTrackSize = trackSz; + counters.shardWriteLoad = shardStore->getWriteLoad(); + counters.shardNodeWrites = shardStore->getStoreCount(); + counters.shardNodeReadsTotal = shardStore->getFetchTotalCount(); + counters.shardNodeReadsHit = shardStore->getFetchHitCount(); + counters.shardNodeWrittenBytes = shardStore->getStoreSize(); + counters.shardNodeReadBytes = shardStore->getFetchSize(); + } + else + { + // Get regular node store metrics + counters.nodeWriteCount = app_.getNodeStore().getStoreCount(); + counters.nodeWriteSize = app_.getNodeStore().getStoreSize(); + counters.nodeFetchCount = app_.getNodeStore().getFetchTotalCount(); + counters.nodeFetchHitCount = app_.getNodeStore().getFetchHitCount(); + counters.nodeFetchSize = app_.getNodeStore().getFetchSize(); + } + + return {counters, objectCounts}; + } + + uint32_t + getPhysicalCPUCount() + { + static uint32_t count = 0; + if (count > 0) + return count; + +#if defined(__linux__) + try + { + std::ifstream cpuinfo("/proc/cpuinfo"); + if (!cpuinfo) + { + JLOG(app_.journal("DatagramMonitor").error()) + << "Unable to open file: /proc/cpuinfo"; + return count; + } + std::string line; + std::set physical_ids; + std::string current_physical_id; + + while (std::getline(cpuinfo, line)) + { + if (line.find("core id") != std::string::npos) + { + current_physical_id = line.substr(line.find(":") + 1); + // Trim whitespace + current_physical_id.erase( + 0, current_physical_id.find_first_not_of(" \t")); + current_physical_id.erase( + current_physical_id.find_last_not_of(" \t") + 1); + physical_ids.insert(current_physical_id); + } + } + + count = physical_ids.size(); + } + catch (const std::exception& e) + { + JLOG(app_.journal("DatagramMonitor").error()) + << "Error getting CPU count: " << e.what(); + } + + // Return at least 1 if we couldn't determine the count + return count > 0 ? count : (count = 1); +#elif defined(__APPLE__) + int value = 0; + size_t size = sizeof(value); + if (sysctlbyname("hw.physicalcpu", &value, &size, NULL, 0) == 0) + count = value; + return count > 0 ? count : (count = 1); +#endif + } + + SystemMetrics + collectSystemMetrics() + { + SystemMetrics metrics{}; + metrics.timestamp = + std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + +#if defined(__linux__) + // Network stats collection + try + { + std::ifstream net_file("/proc/net/dev"); + if (!net_file) + { + JLOG(app_.journal("DatagramMonitor").error()) + << "Unable to open file /proc/net/dev"; + return metrics; + } + + std::string line; + uint64_t total_bytes_in = 0, total_bytes_out = 0; + + // Skip header lines + std::getline(net_file, line); // Inter-| Receive... + std::getline(net_file, line); // face |bytes... + + while (std::getline(net_file, line)) + { + if (line.find(':') != std::string::npos) + { + std::string interface = line.substr(0, line.find(':')); + interface = + interface.substr(interface.find_first_not_of(" \t")); + interface = interface.substr( + 0, interface.find_last_not_of(" \t") + 1); + + // Skip loopback interface + if (interface == "lo") + continue; + + uint64_t bytes_in, bytes_out; + std::istringstream iss(line.substr(line.find(':') + 1)); + iss >> bytes_in; // First field after : is bytes_in + for (int i = 0; i < 8; ++i) + iss >> std::ws; // Skip 8 fields + iss >> bytes_out; // 9th field is bytes_out + + total_bytes_in += bytes_in; + total_bytes_out += bytes_out; + } + } + metrics.network_bytes_in = total_bytes_in; + metrics.network_bytes_out = total_bytes_out; + } + catch (const std::exception& e) + { + JLOG(app_.journal("DatagramMonitor").error()) + << "Error collecting network stats: " << e.what(); + } + + // Disk stats collection + try + { + std::ifstream disk_file("/proc/diskstats"); + if (!disk_file) + { + JLOG(app_.journal("DatagramMonitor").error()) + << "Unable to open file: /proc/diskstats"; + return metrics; + } + std::string line; + uint64_t total_bytes_read = 0, total_bytes_written = 0; + + while (std::getline(disk_file, line)) + { + unsigned int major, minor; + char dev_name[32]; + uint64_t reads, read_sectors, writes, write_sectors; + + if (sscanf( + line.c_str(), + "%u %u %31s %lu %*u %lu %*u %lu %*u %lu", + &major, + &minor, + dev_name, + &reads, + &read_sectors, + &writes, + &write_sectors) == 7) + { + // Only process physical devices + std::string device_name(dev_name); + if (device_name.substr(0, 3) == "dm-" || + device_name.substr(0, 4) == "loop" || + device_name.substr(0, 3) == "ram") + { + continue; + } + + // Skip partitions (usually have a number at the end) + if (std::isdigit(device_name.back())) + { + continue; + } + + uint64_t bytes_read = read_sectors * 512; + uint64_t bytes_written = write_sectors * 512; + + total_bytes_read += bytes_read; + total_bytes_written += bytes_written; + } + } + metrics.disk_bytes_read = total_bytes_read; + metrics.disk_bytes_written = total_bytes_written; + } + catch (const std::exception& e) + { + JLOG(app_.journal("DatagramMonitor").error()) + << "Error collecting disk stats: " << e.what(); + } +#elif defined(__APPLE__) + // Network stats collection + try + { + struct ifaddrs* ifap; + if (getifaddrs(&ifap) == 0) + { + uint64_t total_bytes_in = 0, total_bytes_out = 0; + for (struct ifaddrs* ifa = ifap; ifa; ifa = ifa->ifa_next) + { + if (ifa->ifa_addr != NULL && + ifa->ifa_addr->sa_family == AF_LINK) + { + struct if_data* ifd = (struct if_data*)ifa->ifa_data; + if (ifd != NULL) + { + // Skip loopback interface + if (strcmp(ifa->ifa_name, "lo0") == 0) + continue; + + total_bytes_in += ifd->ifi_ibytes; + total_bytes_out += ifd->ifi_obytes; + } + } + } + freeifaddrs(ifap); + + metrics.network_bytes_in = total_bytes_in; + metrics.network_bytes_out = total_bytes_out; + } + } + catch (const std::exception& e) + { + JLOG(app_.journal("DatagramMonitor").error()) + << "Error collecting network stats: " << e.what(); + } + + // Disk stats collection + // Disk IO stats are not easily accessible in macOS. + // We'll set these values to zero for now. + metrics.disk_bytes_read = 0; + metrics.disk_bytes_written = 0; +#endif + return metrics; + } + + std::vector + generateServerInfo() + { + auto& ops = app_.getOPs(); + + auto [dbg_counters, obj_count_map] = getDebugCounters(); + + // Get the RangeSet directly + auto rangeSet = app_.getLedgerMaster().getCompleteLedgersRangeSet(); + auto currentMetrics = collectSystemMetrics(); + metrics_tracker_.addSample(currentMetrics); + + // Count only non-zero intervals and calculate total size needed + size_t validRangeCount = 0; + for (auto const& interval : rangeSet) + { + // Skip intervals where both lower and upper are 0 + if (interval.lower() != 0 || interval.upper() != 0) + { + validRangeCount++; + } + } + + size_t totalSize = sizeof(ServerInfoHeader) + + (validRangeCount * sizeof(LgrRange)) + (64 * obj_count_map.size()); + + // Allocate buffer and initialize header + std::vector buffer(totalSize); + auto* header = reinterpret_cast(buffer.data()); + memset(header, 0, sizeof(ServerInfoHeader)); + + // Set magic number and version + header->magic = SERVER_INFO_MAGIC; + header->version = SERVER_INFO_VERSION; + header->network_id = app_.config().NETWORK_ID; + header->timestamp = + std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + header->uptime = UptimeClock::now().time_since_epoch().count(); + header->io_latency_us = app_.getIOLatency().count(); + header->validation_quorum = app_.validators().quorum(); + + if (!app_.config().reporting()) + header->peer_count = app_.overlay().size(); + + header->node_size = app_.config().NODE_SIZE; + + // Get state accounting data + auto const [counters, mode, start, initialSync] = + app_.getOPs().getStateAccountingData(); + + // Pack state metrics into header + for (size_t i = 0; i < 5; ++i) + { + header->state_transitions[i] = counters[i].transitions; + header->state_durations[i] = counters[i].dur.count(); + } + header->initial_sync_us = initialSync; + + // Pack warning flags + if (ops.isAmendmentBlocked()) + header->warning_flags |= WARNING_AMENDMENT_BLOCKED; + if (ops.isUNLBlocked()) + header->warning_flags |= WARNING_UNL_BLOCKED; + if (ops.isAmendmentWarned()) + header->warning_flags |= WARNING_AMENDMENT_WARNED; + + // Pack consensus info + auto& mConsensus = ops.getConsensus(); + header->proposer_count = mConsensus.prevProposers(); + header->converge_time_ms = mConsensus.prevRoundTime().count(); + + // Pack fetch pack size if present + auto& ledgerMaster = ops.getLedgerMaster(); + auto const lastClosed = ledgerMaster.getClosedLedger(); + auto const validated = ledgerMaster.getValidatedLedger(); + + if (lastClosed && validated) + { + auto consensus = + ledgerMaster.getLedgerByHash(lastClosed->info().hash); + if (!consensus) + consensus = app_.getInboundLedgers().acquire( + lastClosed->info().hash, + 0, + InboundLedger::Reason::CONSENSUS); + + if (consensus && + (!ledgerMaster.canBeCurrent(consensus) || + !ledgerMaster.isCompatible( + *consensus, + app_.journal("DatagramMonitor").debug(), + "Not switching"))) + { + header->warning_flags |= WARNING_NOT_SYNCED; + } + } + else + { + // If we don't have both lastClosed and validated ledgers, we're + // definitely not synced + header->warning_flags |= WARNING_NOT_SYNCED; + } + + auto const fp = ledgerMaster.getFetchPackCacheSize(); + if (fp != 0) + header->fetch_pack_size = fp; + + // Pack load factor info if not reporting + if (!app_.config().reporting()) + { + auto const escalationMetrics = + app_.getTxQ().getMetrics(*app_.openLedger().current()); + auto const loadFactorServer = app_.getFeeTrack().getLoadFactor(); + auto const loadBaseServer = app_.getFeeTrack().getLoadBase(); + auto const loadFactorFeeEscalation = + mulDiv( + escalationMetrics.openLedgerFeeLevel, + loadBaseServer, + escalationMetrics.referenceFeeLevel) + .second; + + header->load_factor = std::max( + safe_cast(loadFactorServer), + loadFactorFeeEscalation); + header->load_base = loadBaseServer; + } + +#if defined(__linux__) + // Get system info using sysinfo + struct sysinfo si; + if (sysinfo(&si) == 0) + { + header->system_memory_total = si.totalram * si.mem_unit; + header->system_memory_free = si.freeram * si.mem_unit; + header->system_memory_used = + header->system_memory_total - header->system_memory_free; + header->load_avg_1min = si.loads[0] / (float)(1 << SI_LOAD_SHIFT); + header->load_avg_5min = si.loads[1] / (float)(1 << SI_LOAD_SHIFT); + header->load_avg_15min = si.loads[2] / (float)(1 << SI_LOAD_SHIFT); + } +#elif defined(__APPLE__) + // Get total physical memory + int64_t physical_memory; + size_t length = sizeof(physical_memory); + if (sysctlbyname("hw.memsize", &physical_memory, &length, NULL, 0) == 0) + { + header->system_memory_total = physical_memory; + } + + // Get free and used memory + vm_statistics_data_t vm_stats; + mach_msg_type_number_t count = HOST_VM_INFO_COUNT; + if (host_statistics( + mach_host_self(), + HOST_VM_INFO, + (host_info_t)&vm_stats, + &count) == KERN_SUCCESS) + { + uint64_t page_size; + length = sizeof(page_size); + sysctlbyname("hw.pagesize", &page_size, &length, NULL, 0); + + header->system_memory_free = + (uint64_t)vm_stats.free_count * page_size; + header->system_memory_used = + header->system_memory_total - header->system_memory_free; + } + + // Get load averages + double loadavg[3]; + if (getloadavg(loadavg, 3) == 3) + { + header->load_avg_1min = loadavg[0]; + header->load_avg_5min = loadavg[1]; + header->load_avg_15min = loadavg[2]; + } +#endif + + // Get process memory usage + struct rusage usage; + getrusage(RUSAGE_SELF, &usage); + header->process_memory_pages = usage.ru_maxrss; + + // Get disk usage +#if defined(__linux__) + struct statvfs fs; + if (statvfs("/", &fs) == 0) + { + header->system_disk_total = fs.f_blocks * fs.f_frsize; + header->system_disk_free = fs.f_bfree * fs.f_frsize; + header->system_disk_used = + header->system_disk_total - header->system_disk_free; + } +#elif defined(__APPLE__) + struct statfs fs; + if (statfs("/", &fs) == 0) + { + header->system_disk_total = fs.f_blocks * fs.f_bsize; + header->system_disk_free = fs.f_bfree * fs.f_bsize; + header->system_disk_used = + header->system_disk_total - header->system_disk_free; + } +#endif + + // Get CPU core count + header->cpu_cores = getPhysicalCPUCount(); + + // Get rate statistics + auto rates = metrics_tracker_.getRates(currentMetrics); + header->rates.network_in = rates.network_in; + header->rates.network_out = rates.network_out; + header->rates.disk_read = rates.disk_read; + header->rates.disk_write = rates.disk_write; + + // Pack ledger info and ranges + auto lpClosed = ledgerMaster.getValidatedLedger(); + if (!lpClosed && !app_.config().reporting()) + lpClosed = ledgerMaster.getClosedLedger(); + + if (lpClosed) + { + header->ledger_seq = lpClosed->info().seq; + auto const& hash = lpClosed->info().hash; + std::memcpy(header->ledger_hash, hash.data(), 32); + header->reserve_base = lpClosed->fees().accountReserve(0).drops(); + header->reserve_inc = lpClosed->fees().increment.drops(); + } + + // Pack node public key + auto const& nodeKey = app_.nodeIdentity().first; + std::memcpy(header->node_public_key, nodeKey.data(), 33); + + // Pack version string + memset(&header->version_string, 0, 32); + memcpy( + &header->version_string, + BuildInfo::getVersionString().c_str(), + BuildInfo::getVersionString().size() > 32 + ? 32 + : BuildInfo::getVersionString().size()); + + header->dbg_counters = dbg_counters; + + // Set the complete ledger count + header->ledger_range_count = validRangeCount; + + // Append only non-zero ranges after the header + auto* rangeData = reinterpret_cast( + buffer.data() + sizeof(ServerInfoHeader)); + size_t i = 0; + for (auto const& interval : rangeSet) + { + // Only pack non-zero ranges + if (interval.lower() != 0 || interval.upper() != 0) + { + rangeData[i].start = interval.lower(); + rangeData[i].end = interval.upper(); + ++i; + } + } + + uint8_t* end_of_ranges = reinterpret_cast(buffer.data()) + + sizeof(ServerInfoHeader) + (validRangeCount * sizeof(LgrRange)); + + memset(end_of_ranges, 0, 64 * obj_count_map.size()); + + uint8_t* ptr = end_of_ranges; + for (auto& [name, val] : obj_count_map) + { + size_t to_write = name.size() > 56 ? 56 : name.size(); + memcpy(ptr, name.c_str(), to_write); + ptr += 56; + *reinterpret_cast(ptr) = val; + ptr += 8; + } + + return buffer; + } + void + monitorThread() + { + auto endpoint = parseEndpoint(app_.config().DATAGRAM_MONITOR); + int sock = createSocket(endpoint); + + while (running_) + { + try + { + auto info = generateServerInfo(); + sendPacket(sock, endpoint, info); + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + catch (const std::exception& e) + { + // Log error but continue monitoring + JLOG(app_.journal("DatagramMonitor").error()) + << "Server info monitor error: " << e.what(); + } + } + + close(sock); + } + +public: + DatagramMonitor(Application& app) : app_(app) + { + } + + void + start() + { + if (!running_.exchange(true)) + { + monitor_thread_ = + std::thread(&DatagramMonitor::monitorThread, this); + } + } + + void + stop() + { + if (running_.exchange(false)) + { + if (monitor_thread_.joinable()) + monitor_thread_.join(); + } + } + + ~DatagramMonitor() + { + stop(); + } +}; +} // namespace ripple +#endif diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 637f7725e..df1b1ba08 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -70,7 +71,6 @@ #include #include #include -#include #include #include #include @@ -117,81 +117,6 @@ class NetworkOPsImp final : public NetworkOPs running, }; - static std::array const states_; - - /** - * State accounting records two attributes for each possible server state: - * 1) Amount of time spent in each state (in microseconds). This value is - * updated upon each state transition. - * 2) Number of transitions to each state. - * - * This data can be polled through server_info and represented by - * monitoring systems similarly to how bandwidth, CPU, and other - * counter-based metrics are managed. - * - * State accounting is more accurate than periodic sampling of server - * state. With periodic sampling, it is very likely that state transitions - * are missed, and accuracy of time spent in each state is very rough. - */ - class StateAccounting - { - struct Counters - { - explicit Counters() = default; - - std::uint64_t transitions = 0; - std::chrono::microseconds dur = std::chrono::microseconds(0); - }; - - OperatingMode mode_ = OperatingMode::DISCONNECTED; - std::array counters_; - mutable std::mutex mutex_; - std::chrono::steady_clock::time_point start_ = - std::chrono::steady_clock::now(); - std::chrono::steady_clock::time_point const processStart_ = start_; - std::uint64_t initialSyncUs_{0}; - static std::array const states_; - - public: - explicit StateAccounting() - { - counters_[static_cast(OperatingMode::DISCONNECTED)] - .transitions = 1; - } - - /** - * Record state transition. Update duration spent in previous - * state. - * - * @param om New state. - */ - void - mode(OperatingMode om); - - /** - * Output state counters in JSON format. - * - * @obj Json object to which to add state accounting data. - */ - void - json(Json::Value& obj) const; - - struct CounterData - { - decltype(counters_) counters; - decltype(mode_) mode; - decltype(start_) start; - decltype(initialSyncUs_) initialSyncUs; - }; - - CounterData - getCounterData() const - { - std::lock_guard lock(mutex_); - return {counters_, mode_, start_, initialSyncUs_}; - } - }; - //! Server fees published on `server` subscription struct ServerFeeSummary { @@ -273,6 +198,9 @@ public: std::string strOperatingMode(bool const admin = false) const override; + StateAccounting::CounterData + getStateAccountingData(); + // // Transaction operations. // @@ -777,11 +705,17 @@ private: DispatchState mDispatchState = DispatchState::none; std::vector mTransactions; - StateAccounting accounting_{}; + StateAccounting accounting_; std::set pendingValidations_; std::mutex validationsMutex_; + RCLConsensus& + getConsensus(); + + LedgerMaster& + getLedgerMaster(); + private: struct Stats { @@ -844,19 +778,6 @@ private: //------------------------------------------------------------------------------ -static std::array const stateNames{ - {"disconnected", "connected", "syncing", "tracking", "full"}}; - -std::array const NetworkOPsImp::states_ = stateNames; - -std::array const - NetworkOPsImp::StateAccounting::states_ = { - {Json::StaticString(stateNames[0]), - Json::StaticString(stateNames[1]), - Json::StaticString(stateNames[2]), - Json::StaticString(stateNames[3]), - Json::StaticString(stateNames[4])}}; - static auto const genesisAccountId = calcAccountID( generateKeyPair(KeyType::secp256k1, generateSeed("masterpassphrase")) .first); @@ -1131,7 +1052,7 @@ NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin) } } - return states_[static_cast(mode)]; + return {StateAccounting::states_[static_cast(mode)].c_str()}; } void @@ -2397,6 +2318,19 @@ NetworkOPsImp::getConsensusInfo() return mConsensus.getJson(true); } +// RHTODO: not threadsafe? +RCLConsensus& +NetworkOPsImp::getConsensus() +{ + return mConsensus; +} + +LedgerMaster& +NetworkOPsImp::getLedgerMaster() +{ + return m_ledgerMaster; +} + Json::Value NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) { @@ -4194,6 +4128,12 @@ NetworkOPsImp::stateAccounting(Json::Value& obj) accounting_.json(obj); } +StateAccounting::CounterData +NetworkOPsImp::getStateAccountingData() +{ + return accounting_.getCounterData(); +} + // <-- bool: true=erased, false=was not there bool NetworkOPsImp::unsubValidations(std::uint64_t uSeq) @@ -4664,50 +4604,6 @@ NetworkOPsImp::collect_metrics() counters[static_cast(OperatingMode::FULL)].transitions); } -void -NetworkOPsImp::StateAccounting::mode(OperatingMode om) -{ - auto now = std::chrono::steady_clock::now(); - - std::lock_guard lock(mutex_); - ++counters_[static_cast(om)].transitions; - if (om == OperatingMode::FULL && - counters_[static_cast(om)].transitions == 1) - { - initialSyncUs_ = std::chrono::duration_cast( - now - processStart_) - .count(); - } - counters_[static_cast(mode_)].dur += - std::chrono::duration_cast(now - start_); - - mode_ = om; - start_ = now; -} - -void -NetworkOPsImp::StateAccounting::json(Json::Value& obj) const -{ - auto [counters, mode, start, initialSync] = getCounterData(); - auto const current = std::chrono::duration_cast( - std::chrono::steady_clock::now() - start); - counters[static_cast(mode)].dur += current; - - obj[jss::state_accounting] = Json::objectValue; - for (std::size_t i = static_cast(OperatingMode::DISCONNECTED); - i <= static_cast(OperatingMode::FULL); - ++i) - { - obj[jss::state_accounting][states_[i]] = Json::objectValue; - auto& state = obj[jss::state_accounting][states_[i]]; - state[jss::transitions] = std::to_string(counters[i].transitions); - state[jss::duration_us] = std::to_string(counters[i].dur.count()); - } - obj[jss::server_state_duration_us] = std::to_string(current.count()); - if (initialSync) - obj[jss::initial_sync_duration_us] = std::to_string(initialSync); -} - //------------------------------------------------------------------------------ std::unique_ptr diff --git a/src/ripple/app/misc/NetworkOPs.h b/src/ripple/app/misc/NetworkOPs.h index d53127ed3..350542404 100644 --- a/src/ripple/app/misc/NetworkOPs.h +++ b/src/ripple/app/misc/NetworkOPs.h @@ -20,8 +20,10 @@ #ifndef RIPPLE_APP_MISC_NETWORKOPS_H_INCLUDED #define RIPPLE_APP_MISC_NETWORKOPS_H_INCLUDED +#include #include #include +#include #include #include #include @@ -42,35 +44,6 @@ class LedgerMaster; class Transaction; class ValidatorKeys; -// This is the primary interface into the "client" portion of the program. -// Code that wants to do normal operations on the network such as -// creating and monitoring accounts, creating transactions, and so on -// should use this interface. The RPC code will primarily be a light wrapper -// over this code. -// -// Eventually, it will check the node's operating mode (synched, unsynched, -// etectera) and defer to the correct means of processing. The current -// code assumes this node is synched (and will continue to do so until -// there's a functional network. -// - -/** Specifies the mode under which the server believes it's operating. - - This has implications about how the server processes transactions and - how it responds to requests (e.g. account balance request). - - @note Other code relies on the numerical values of these constants; do - not change them without verifying each use and ensuring that it is - not a breaking change. -*/ -enum class OperatingMode { - DISCONNECTED = 0, //!< not ready to process requests - CONNECTED = 1, //!< convinced we are talking to the network - SYNCING = 2, //!< fallen slightly behind - TRACKING = 3, //!< convinced we agree with the network - FULL = 4 //!< we have the ledger and can even validate -}; - /** Provides server functionality for clients. Clients include backend applications, local commands, and connected @@ -221,6 +194,13 @@ public: virtual Json::Value getConsensusInfo() = 0; + + virtual RCLConsensus& + getConsensus() = 0; + + virtual LedgerMaster& + getLedgerMaster() = 0; + virtual Json::Value getServerInfo(bool human, bool admin, bool counters) = 0; virtual void @@ -228,6 +208,9 @@ public: virtual Json::Value getLedgerFetchInfo() = 0; + virtual StateAccounting::CounterData + getStateAccountingData() = 0; + /** Accepts the current transaction tree, return the new ledger's sequence This API is only used via RPC with the server in STANDALONE mode and diff --git a/src/ripple/app/misc/StateAccounting.cpp b/src/ripple/app/misc/StateAccounting.cpp new file mode 100644 index 000000000..ade601a80 --- /dev/null +++ b/src/ripple/app/misc/StateAccounting.cpp @@ -0,0 +1,49 @@ +#include + +namespace ripple { + +void +StateAccounting::mode(OperatingMode om) +{ + std::lock_guard lock(mutex_); + auto now = std::chrono::steady_clock::now(); + + ++counters_[static_cast(om)].transitions; + if (om == OperatingMode::FULL && + counters_[static_cast(om)].transitions == 1) + { + initialSyncUs_ = std::chrono::duration_cast( + now - processStart_) + .count(); + } + counters_[static_cast(mode_)].dur += + std::chrono::duration_cast(now - start_); + + mode_ = om; + start_ = now; +} + +void +StateAccounting::json(Json::Value& obj) +{ + auto [counters, mode, start, initialSync] = getCounterData(); + auto const current = std::chrono::duration_cast( + std::chrono::steady_clock::now() - start); + counters[static_cast(mode)].dur += current; + + obj[jss::state_accounting] = Json::objectValue; + for (std::size_t i = static_cast(OperatingMode::DISCONNECTED); + i <= static_cast(OperatingMode::FULL); + ++i) + { + obj[jss::state_accounting][states_[i]] = Json::objectValue; + auto& state = obj[jss::state_accounting][states_[i]]; + state[jss::transitions] = std::to_string(counters[i].transitions); + state[jss::duration_us] = std::to_string(counters[i].dur.count()); + } + obj[jss::server_state_duration_us] = std::to_string(current.count()); + if (initialSync) + obj[jss::initial_sync_duration_us] = std::to_string(initialSync); +} + +} // namespace ripple diff --git a/src/ripple/app/misc/StateAccounting.h b/src/ripple/app/misc/StateAccounting.h new file mode 100644 index 000000000..3cbc5cc73 --- /dev/null +++ b/src/ripple/app/misc/StateAccounting.h @@ -0,0 +1,99 @@ +#ifndef RIPPLE_APP_MAIN_STATEACCOUNTING_H_INCLUDED +#define RIPPLE_APP_MAIN_STATEACCOUNTING_H_INCLUDED + +#include +#include +#include +#include +#include +#include + +namespace ripple { + +// This is the primary interface into the "client" portion of the program. +// Code that wants to do normal operations on the network such as +// creating and monitoring accounts, creating transactions, and so on +// should use this interface. The RPC code will primarily be a light wrapper +// over this code. +// +// Eventually, it will check the node's operating mode (synched, unsynched, +// etectera) and defer to the correct means of processing. The current +// code assumes this node is synched (and will continue to do so until +// there's a functional network. +// + +/** Specifies the mode under which the server believes it's operating. + + This has implications about how the server processes transactions and + how it responds to requests (e.g. account balance request). + + @note Other code relies on the numerical values of these constants; do + not change them without verifying each use and ensuring that it is + not a breaking change. +*/ +enum class OperatingMode { + DISCONNECTED = 0, //!< not ready to process requests + CONNECTED = 1, //!< convinced we are talking to the network + SYNCING = 2, //!< fallen slightly behind + TRACKING = 3, //!< convinced we agree with the network + FULL = 4 //!< we have the ledger and can even validate +}; + +class StateAccounting +{ +public: + constexpr static std::array const states_ = { + {Json::StaticString("disconnected"), + Json::StaticString("connected"), + Json::StaticString("syncing"), + Json::StaticString("tracking"), + Json::StaticString("full")}}; + + struct Counters + { + explicit Counters() = default; + + std::uint64_t transitions = 0; + std::chrono::microseconds dur = std::chrono::microseconds(0); + }; + +private: + OperatingMode mode_ = OperatingMode::DISCONNECTED; + std::array counters_; + mutable std::mutex mutex_; + std::chrono::steady_clock::time_point start_ = + std::chrono::steady_clock::now(); + std::chrono::steady_clock::time_point const processStart_ = start_; + std::uint64_t initialSyncUs_{0}; + +public: + explicit StateAccounting() + { + counters_[static_cast(OperatingMode::DISCONNECTED)] + .transitions = 1; + } + + //! Record state transition. Update duration spent in previous state. + void + mode(OperatingMode om); + + //! Output state counters in JSON format. + void + json(Json::Value& obj); + + using CounterData = std::tuple< + decltype(counters_), + decltype(mode_), + decltype(start_), + decltype(initialSyncUs_)>; + + CounterData + getCounterData() + { + return {counters_, mode_, start_, initialSyncUs_}; + } +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index 2779547e2..5d9977770 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -155,6 +155,8 @@ public: std::map IMPORT_VL_KEYS; // hex string -> class PublicKey (for caching purposes) + std::string DATAGRAM_MONITOR; + enum StartUpType { FRESH, NORMAL, diff --git a/src/ripple/core/ConfigSections.h b/src/ripple/core/ConfigSections.h index 27f38bc6e..def5b3c82 100644 --- a/src/ripple/core/ConfigSections.h +++ b/src/ripple/core/ConfigSections.h @@ -101,6 +101,7 @@ struct ConfigSection #define SECTION_SWEEP_INTERVAL "sweep_interval" #define SECTION_NETWORK_ID "network_id" #define SECTION_IMPORT_VL_KEYS "import_vl_keys" +#define SECTION_DATAGRAM_MONITOR "datagram_monitor" } // namespace ripple diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index 37315d8f2..656993752 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -281,6 +281,9 @@ Config::setupControl(bool bQuiet, bool bSilent, bool bStandalone) // RAM and CPU resources. We default to "tiny" for standalone mode. if (!bStandalone) { + NODE_SIZE = 4; + return; + // First, check against 'minimum' RAM requirements per node size: auto const& threshold = sizedItems[std::underlying_type_t(SizedItem::ramSizeGB)]; @@ -465,26 +468,24 @@ Config::loadFromString(std::string const& fileContents) SNTP_SERVERS = *s; // if the user has specified ip:port then replace : with a space. - { - auto replaceColons = [](std::vector& strVec) { - const static std::regex e(":([0-9]+)$"); - for (auto& line : strVec) - { - // skip anything that might be an ipv6 address - if (std::count(line.begin(), line.end(), ':') != 1) - continue; + auto replaceColons = [](std::vector& strVec) { + const static std::regex e(":([0-9]+)$"); + for (auto& line : strVec) + { + // skip anything that might be an ipv6 address + if (std::count(line.begin(), line.end(), ':') != 1) + continue; - std::string result = std::regex_replace(line, e, " $1"); - // sanity check the result of the replace, should be same length - // as input - if (result.size() == line.size()) - line = result; - } - }; + std::string result = std::regex_replace(line, e, " $1"); + // sanity check the result of the replace, should be same length + // as input + if (result.size() == line.size()) + line = result; + } + }; - replaceColons(IPS_FIXED); - replaceColons(IPS); - } + replaceColons(IPS_FIXED); + replaceColons(IPS); { std::string dbPath; @@ -509,6 +510,13 @@ Config::loadFromString(std::string const& fileContents) NETWORK_ID = beast::lexicalCastThrow(strTemp); } + if (getSingleSection(secConfig, SECTION_DATAGRAM_MONITOR, strTemp, j_)) + { + std::vector vecTemp{strTemp}; + replaceColons(vecTemp); + DATAGRAM_MONITOR = vecTemp[0]; + } + if (getSingleSection(secConfig, SECTION_PEER_PRIVATE, strTemp, j_)) PEER_PRIVATE = beast::lexicalCastThrow(strTemp); From 532a471a356a0ea329adca39ec9f30fe5f740233 Mon Sep 17 00:00:00 2001 From: Richard Holland Date: Wed, 27 Nov 2024 10:16:22 +1100 Subject: [PATCH 06/33] fixReduceImport (#398) Co-authored-by: Denis Angell --- src/ripple/app/tx/impl/Import.cpp | 39 +++++ src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/TER.h | 1 + src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/TER.cpp | 1 + src/test/app/Import_test.cpp | 216 ++++++++++++++++++++++++++- 6 files changed, 252 insertions(+), 9 deletions(-) diff --git a/src/ripple/app/tx/impl/Import.cpp b/src/ripple/app/tx/impl/Import.cpp index d40a80eba..335cbe581 100644 --- a/src/ripple/app/tx/impl/Import.cpp +++ b/src/ripple/app/tx/impl/Import.cpp @@ -889,6 +889,45 @@ Import::preclaim(PreclaimContext const& ctx) } auto const& sle = ctx.view.read(keylet::account(ctx.tx[sfAccount])); + + auto const tt = stpTrans->getTxnType(); + if ((tt == ttSIGNER_LIST_SET || tt == ttREGULAR_KEY_SET) && + ctx.view.rules().enabled(fixReduceImport) && sle) + { + // blackhole check + do + { + // if master key is not set then it is not blackholed + if (!(sle->getFlags() & lsfDisableMaster)) + break; + + // if a regular key is set then it must be acc 0, 1, or 2 otherwise + // not blackholed + if (sle->isFieldPresent(sfRegularKey)) + { + AccountID rk = sle->getAccountID(sfRegularKey); + static const AccountID ACCOUNT_ZERO(0); + static const AccountID ACCOUNT_ONE(1); + static const AccountID ACCOUNT_TWO(2); + + if (rk != ACCOUNT_ZERO && rk != ACCOUNT_ONE && + rk != ACCOUNT_TWO) + break; + } + + // if a signer list is set then it's not blackholed + auto const signerListKeylet = keylet::signers(ctx.tx[sfAccount]); + if (ctx.view.exists(signerListKeylet)) + break; + + // execution to here means it's blackholed + JLOG(ctx.j.warn()) + << "Import: during preclaim target account is blackholed " + << ctx.tx[sfAccount] << ", bailing."; + return tefIMPORT_BLACKHOLED; + } while (0); + } + if (sle && sle->isFieldPresent(sfImportSequence)) { uint32_t sleImportSequence = sle->getFieldU32(sfImportSequence); diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 43d510c63..715f5dac6 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 74; +static constexpr std::size_t numFeatures = 75; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -362,6 +362,7 @@ extern uint256 const fix240819; extern uint256 const fixPageCap; extern uint256 const fix240911; extern uint256 const fixFloatDivide; +extern uint256 const fixReduceImport; } // namespace ripple diff --git a/src/ripple/protocol/TER.h b/src/ripple/protocol/TER.h index 42d3cabd3..7cd3cae42 100644 --- a/src/ripple/protocol/TER.h +++ b/src/ripple/protocol/TER.h @@ -184,6 +184,7 @@ enum TEFcodes : TERUnderlyingType { tefPAST_IMPORT_SEQ, tefPAST_IMPORT_VL_SEQ, tefNONDIR_EMIT, + tefIMPORT_BLACKHOLED, }; //------------------------------------------------------------------------------ diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 23cbe236d..1c7fc931b 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -468,6 +468,7 @@ REGISTER_FIX (fix240819, Supported::yes, VoteBehavior::De REGISTER_FIX (fixPageCap, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fix240911, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fixFloatDivide, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixReduceImport, Supported::yes, VoteBehavior::DefaultYes); // The following amendments are obsolete, but must remain supported // because they could potentially get enabled. diff --git a/src/ripple/protocol/impl/TER.cpp b/src/ripple/protocol/impl/TER.cpp index be8be1dd4..e41134a0c 100644 --- a/src/ripple/protocol/impl/TER.cpp +++ b/src/ripple/protocol/impl/TER.cpp @@ -116,6 +116,7 @@ transResults() MAKE_ERROR(tefNO_TICKET, "Ticket is not in ledger."), MAKE_ERROR(tefNFTOKEN_IS_NOT_TRANSFERABLE, "The specified NFToken is not transferable."), MAKE_ERROR(tefNONDIR_EMIT, "An emitted txn was injected into the ledger without a corresponding directory entry."), + MAKE_ERROR(tefIMPORT_BLACKHOLED, "Cannot import keying because target account is blackholed."), MAKE_ERROR(telLOCAL_ERROR, "Local failure."), MAKE_ERROR(telBAD_DOMAIN, "Domain too long."), diff --git a/src/test/app/Import_test.cpp b/src/test/app/Import_test.cpp index c115bcc4d..09ffd4a02 100644 --- a/src/test/app/Import_test.cpp +++ b/src/test/app/Import_test.cpp @@ -79,7 +79,7 @@ class Import_test : public beast::unit_test::suite importVLSequence(jtx::Env const& env, PublicKey const& pk) { auto const sle = env.le(keylet::import_vlseq(pk)); - if (sle->isFieldPresent(sfImportSequence)) + if (sle && sle->isFieldPresent(sfImportSequence)) return (*sle)[sfImportSequence]; return 0; } @@ -2672,6 +2672,134 @@ class Import_test : public beast::unit_test::suite env(import::import(alice, tmpXpop), ter(temMALFORMED)); } + // tefIMPORT_BLACKHOLED - SetRegularKey (w/seed) AccountZero + { + test::jtx::Env env{ + *this, network::makeNetworkVLConfig(21337, keys)}; + auto const feeDrops = env.current()->fees().base; + + auto const alice = Account("alice"); + env.fund(XRP(1000), alice); + env.close(); + + // Set Regular Key + Json::Value jv; + jv[jss::Account] = alice.human(); + const AccountID ACCOUNT_ZERO(0); + jv["RegularKey"] = to_string(ACCOUNT_ZERO); + jv[jss::TransactionType] = jss::SetRegularKey; + env(jv, alice); + + // Disable Master Key + env(fset(alice, asfDisableMaster), sig(alice)); + env.close(); + + // Import with Master Key + Json::Value tmpXpop = + import::loadXpop(ImportTCSetRegularKey::w_seed); + env(import::import(alice, tmpXpop), + ter(tefIMPORT_BLACKHOLED), + fee(feeDrops * 10), + sig(alice)); + env.close(); + } + + // tefIMPORT_BLACKHOLED - SetRegularKey (w/seed) AccountOne + { + test::jtx::Env env{ + *this, network::makeNetworkVLConfig(21337, keys)}; + auto const feeDrops = env.current()->fees().base; + + auto const alice = Account("alice"); + env.fund(XRP(1000), alice); + env.close(); + + // Set Regular Key + Json::Value jv; + jv[jss::Account] = alice.human(); + const AccountID ACCOUNT_ONE(1); + jv["RegularKey"] = to_string(ACCOUNT_ONE); + jv[jss::TransactionType] = jss::SetRegularKey; + env(jv, alice); + + // Disable Master Key + env(fset(alice, asfDisableMaster), sig(alice)); + env.close(); + + // Import with Master Key + Json::Value tmpXpop = + import::loadXpop(ImportTCSetRegularKey::w_seed); + env(import::import(alice, tmpXpop), + ter(tefIMPORT_BLACKHOLED), + fee(feeDrops * 10), + sig(alice)); + env.close(); + } + + // tefIMPORT_BLACKHOLED - SetRegularKey (w/seed) AccountTwo + { + test::jtx::Env env{ + *this, network::makeNetworkVLConfig(21337, keys)}; + auto const feeDrops = env.current()->fees().base; + + auto const alice = Account("alice"); + env.fund(XRP(1000), alice); + env.close(); + + // Set Regular Key + Json::Value jv; + jv[jss::Account] = alice.human(); + const AccountID ACCOUNT_TWO(2); + jv["RegularKey"] = to_string(ACCOUNT_TWO); + jv[jss::TransactionType] = jss::SetRegularKey; + env(jv, alice); + + // Disable Master Key + env(fset(alice, asfDisableMaster), sig(alice)); + env.close(); + + // Import with Master Key + Json::Value tmpXpop = + import::loadXpop(ImportTCSetRegularKey::w_seed); + env(import::import(alice, tmpXpop), + ter(tefIMPORT_BLACKHOLED), + fee(feeDrops * 10), + sig(alice)); + env.close(); + } + + // tefIMPORT_BLACKHOLED - SignersListSet (w/seed) + { + test::jtx::Env env{ + *this, network::makeNetworkVLConfig(21337, keys)}; + auto const feeDrops = env.current()->fees().base; + + auto const alice = Account("alice"); + env.fund(XRP(1000), alice); + env.close(); + + // Set Regular Key + Json::Value jv; + jv[jss::Account] = alice.human(); + const AccountID ACCOUNT_ZERO(0); + jv["RegularKey"] = to_string(ACCOUNT_ZERO); + jv[jss::TransactionType] = jss::SetRegularKey; + env(jv, alice); + + // Disable Master Key + env(fset(alice, asfDisableMaster), sig(alice)); + env.close(); + + // Import with Master Key + Json::Value tmpXpop = + import::loadXpop(ImportTCSignersListSet::w_seed); + env(import::import(alice, tmpXpop), + ter(tefIMPORT_BLACKHOLED), + fee(feeDrops * 10), + sig(alice)); + env.close(); + } + // tefPAST_IMPORT_SEQ { test::jtx::Env env{ @@ -4580,14 +4708,22 @@ class Import_test : public beast::unit_test::suite // confirm signers set auto const [signers, signersSle] = signersKeyAndSle(*env.current(), alice); - auto const signerEntries = - signersSle->getFieldArray(sfSignerEntries); - BEAST_EXPECT(signerEntries.size() == 2); - BEAST_EXPECT(signerEntries[0u].getFieldU16(sfSignerWeight) == 1); BEAST_EXPECT( - signerEntries[0u].getAccountID(sfAccount) == carol.id()); - BEAST_EXPECT(signerEntries[1u].getFieldU16(sfSignerWeight) == 1); - BEAST_EXPECT(signerEntries[1u].getAccountID(sfAccount) == bob.id()); + signersSle && signersSle->isFieldPresent(sfSignerEntries)); + if (signersSle && signersSle->isFieldPresent(sfSignerEntries)) + { + auto const signerEntries = + signersSle->getFieldArray(sfSignerEntries); + BEAST_EXPECT(signerEntries.size() == 2); + BEAST_EXPECT( + signerEntries[0u].getFieldU16(sfSignerWeight) == 1); + BEAST_EXPECT( + signerEntries[0u].getAccountID(sfAccount) == carol.id()); + BEAST_EXPECT( + signerEntries[1u].getFieldU16(sfSignerWeight) == 1); + BEAST_EXPECT( + signerEntries[1u].getAccountID(sfAccount) == bob.id()); + } // confirm multisign tx env.close(); @@ -5986,6 +6122,69 @@ class Import_test : public beast::unit_test::suite } } + void + testBlackhole(FeatureBitset features) + { + testcase("blackhole"); + + using namespace test::jtx; + using namespace std::literals; + + auto blackholeAccount = [&](Env& env, Account const& acct) { + // Set Regular Key + Json::Value jv; + jv[jss::Account] = acct.human(); + const AccountID ACCOUNT_ZERO(0); + jv["RegularKey"] = to_string(ACCOUNT_ZERO); + jv[jss::TransactionType] = jss::SetRegularKey; + env(jv, acct); + + // Disable Master Key + env(fset(acct, asfDisableMaster), sig(acct)); + env.close(); + }; + + auto burnHeader = [&](Env& env) { + // confirm total coins header + auto const initCoins = env.current()->info().drops; + BEAST_EXPECT(initCoins == 100'000'000'000'000'000); + + // burn 10'000 xrp + auto const master = Account("masterpassphrase"); + env(noop(master), fee(100'000'000'000'000), ter(tesSUCCESS)); + env.close(); + + // confirm total coins header + auto const burnCoins = env.current()->info().drops; + BEAST_EXPECT(burnCoins == initCoins - 100'000'000'000'000); + }; + + // AccountSet (w/seed) + { + test::jtx::Env env{ + *this, network::makeNetworkVLConfig(21337, keys)}; + auto const feeDrops = env.current()->fees().base; + + // Burn Header + burnHeader(env); + + auto const alice = Account("alice"); + env.fund(XRP(1000), alice); + env.close(); + + // Blackhole Account + blackholeAccount(env, alice); + + // Import with Master Key + Json::Value tmpXpop = import::loadXpop(ImportTCAccountSet::w_seed); + env(import::import(alice, tmpXpop), + ter(tesSUCCESS), + fee(feeDrops * 10), + sig(alice)); + env.close(); + } + } + public: void run() override @@ -6026,6 +6225,7 @@ public: testMaxSupply(features); testMinMax(features); testHalving(features - featureOwnerPaysFee); + testBlackhole(features); } }; From d878fd4a6e2db2f6e183b417da825428a5d719c6 Mon Sep 17 00:00:00 2001 From: RichardAH Date: Sat, 14 Dec 2024 08:44:40 +1000 Subject: [PATCH 07/33] allow multiple datagram monitor endpoints (#408) --- src/ripple/app/main/Application.cpp | 2 +- src/ripple/app/misc/DatagramMonitor.h | 20 ++++++++++++++++---- src/ripple/core/Config.h | 2 +- src/ripple/core/impl/Config.cpp | 7 +++---- 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index be0a7b46a..20427107b 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -1527,7 +1527,7 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) reportingETL_->start(); // Datagram monitor if applicable - if (!config_->standalone() && config_->DATAGRAM_MONITOR != "") + if (!config_->standalone() && !config_->DATAGRAM_MONITOR.empty()) { datagram_monitor_ = std::make_unique(*this); if (datagram_monitor_) diff --git a/src/ripple/app/misc/DatagramMonitor.h b/src/ripple/app/misc/DatagramMonitor.h index ba6ce0213..033090581 100644 --- a/src/ripple/app/misc/DatagramMonitor.h +++ b/src/ripple/app/misc/DatagramMonitor.h @@ -996,15 +996,24 @@ private: void monitorThread() { - auto endpoint = parseEndpoint(app_.config().DATAGRAM_MONITOR); - int sock = createSocket(endpoint); + std::vector> endpoints; + + for (auto const& epStr : app_.config().DATAGRAM_MONITOR) + { + auto endpoint = parseEndpoint(epStr); + endpoints.push_back( + std::make_pair(endpoint, createSocket(endpoint))); + } while (running_) { try { auto info = generateServerInfo(); - sendPacket(sock, endpoint, info); + for (auto const& ep : endpoints) + { + sendPacket(ep.second, ep.first, info); + } std::this_thread::sleep_for(std::chrono::seconds(1)); } catch (const std::exception& e) @@ -1015,7 +1024,10 @@ private: } } - close(sock); + for (auto const& ep : endpoints) + { + close(ep.second); + } } public: diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index 5d9977770..3e2c3c81a 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -155,7 +155,7 @@ public: std::map IMPORT_VL_KEYS; // hex string -> class PublicKey (for caching purposes) - std::string DATAGRAM_MONITOR; + std::vector DATAGRAM_MONITOR; enum StartUpType { FRESH, diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index 656993752..7673d16ec 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -510,11 +510,10 @@ Config::loadFromString(std::string const& fileContents) NETWORK_ID = beast::lexicalCastThrow(strTemp); } - if (getSingleSection(secConfig, SECTION_DATAGRAM_MONITOR, strTemp, j_)) + if (auto s = getIniFileSection(secConfig, SECTION_DATAGRAM_MONITOR)) { - std::vector vecTemp{strTemp}; - replaceColons(vecTemp); - DATAGRAM_MONITOR = vecTemp[0]; + DATAGRAM_MONITOR = *s; + replaceColons(DATAGRAM_MONITOR); } if (getSingleSection(secConfig, SECTION_PEER_PRIVATE, strTemp, j_)) From 85a752235a2e4870fd51cd8dafdefaf9cda78e05 Mon Sep 17 00:00:00 2001 From: tequ Date: Mon, 16 Dec 2024 15:10:01 +0900 Subject: [PATCH 08/33] add URITokenIssuer to account_flags for account_info (#404) --- src/ripple/rpc/handlers/AccountInfo.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index b71fd4865..e811baf58 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -75,7 +75,7 @@ doAccountInfo(RPC::JsonContext& context) auto const accountID{std::move(id.value())}; static constexpr std:: - array, 10> + array, 11> lsFlags{ {{"defaultRipple", lsfDefaultRipple}, {"depositAuth", lsfDepositAuth}, @@ -86,7 +86,8 @@ doAccountInfo(RPC::JsonContext& context) {"passwordSpent", lsfPasswordSpent}, {"requireAuthorization", lsfRequireAuth}, {"tshCollect", lsfTshCollect}, - {"requireDestinationTag", lsfRequireDestTag}}}; + {"requireDestinationTag", lsfRequireDestTag}, + {"uriTokenIssuer", lsfURITokenIssuer}}}; static constexpr std:: array, 5> From 621ca9c86546df7267b38dc5dff0603e02090b5a Mon Sep 17 00:00:00 2001 From: tequ Date: Wed, 22 Jan 2025 07:34:33 +0900 Subject: [PATCH 09/33] Add space to `trace_float` log (#424) --- src/ripple/app/hook/impl/applyHook.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/app/hook/impl/applyHook.cpp b/src/ripple/app/hook/impl/applyHook.cpp index abd7ef136..0507aca97 100644 --- a/src/ripple/app/hook/impl/applyHook.cpp +++ b/src/ripple/app/hook/impl/applyHook.cpp @@ -4790,7 +4790,7 @@ DEFINE_HOOK_FUNCTION( if (float1 == 0) { - j.trace() << "HookTrace[" << HC_ACC() << "]:" + j.trace() << "HookTrace[" << HC_ACC() << "]: " << (read_len == 0 ? "" : std::string_view( From 446617523112cd773b01b20209289a8008108b99 Mon Sep 17 00:00:00 2001 From: tequ Date: Wed, 22 Jan 2025 07:38:12 +0900 Subject: [PATCH 10/33] Update boost link for build-full.sh (#421) --- build-full.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-full.sh b/build-full.sh index 4ea18fea0..3ae0251d7 100755 --- a/build-full.sh +++ b/build-full.sh @@ -92,7 +92,7 @@ pwd && tar -xzf cmake-3.23.1-linux-x86_64.tar.gz -C /hbb/ && echo "-- Install Boost 1.86.0 --" && pwd && -( wget -nc -q https://boostorg.jfrog.io/artifactory/main/release/1.86.0/source/boost_1_86_0.tar.gz; echo "" ) && +( wget -nc -q https://archives.boost.io/release/1.86.0/source/boost_1_86_0.tar.gz; echo "" ) && tar -xzf boost_1_86_0.tar.gz && cd boost_1_86_0 && ./bootstrap.sh && ./b2 link=static -j$3 && ./b2 install && cd ../ && From d17f7151ab35e4c7b7fbb18371a427f62b416d74 Mon Sep 17 00:00:00 2001 From: tequ Date: Wed, 22 Jan 2025 12:33:59 +0900 Subject: [PATCH 11/33] Fix HookResult(ExitType) when accept() is not called (#415) --- src/ripple/app/hook/impl/applyHook.cpp | 7 ++++--- src/ripple/app/tx/impl/Transactor.cpp | 24 ++++++++++++++++-------- src/ripple/protocol/Feature.h | 3 ++- src/ripple/protocol/impl/Feature.cpp | 1 + 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/ripple/app/hook/impl/applyHook.cpp b/src/ripple/app/hook/impl/applyHook.cpp index 0507aca97..aeb9f3f50 100644 --- a/src/ripple/app/hook/impl/applyHook.cpp +++ b/src/ripple/app/hook/impl/applyHook.cpp @@ -1217,9 +1217,10 @@ hook::apply( .hookParamOverrides = hookParamOverrides, .hookParams = hookParams, .hookSkips = {}, - .exitType = - hook_api::ExitType::ROLLBACK, // default is to rollback unless - // hook calls accept() + .exitType = applyCtx.view().rules().enabled(fixXahauV3) + ? hook_api::ExitType::UNSET + : hook_api::ExitType::ROLLBACK, // default is to rollback + // unless hook calls accept() .exitReason = std::string(""), .exitCode = -1, .hasCallback = hasCallback, diff --git a/src/ripple/app/tx/impl/Transactor.cpp b/src/ripple/app/tx/impl/Transactor.cpp index 7c2734f20..180bf64a8 100644 --- a/src/ripple/app/tx/impl/Transactor.cpp +++ b/src/ripple/app/tx/impl/Transactor.cpp @@ -1270,10 +1270,18 @@ Transactor::executeHookChain( if (results.back().exitType == hook_api::ExitType::WASM_ERROR) { JLOG(j_.warn()) << "HookError[" << account << "-" - << ctx_.tx.getAccountID(sfAccount) << "]: " + << ctx_.tx.getAccountID(sfAccount) << "]: Execution failure (graceful) " << "HookHash: " << hookHash; } + if (results.back().exitType == hook_api::ExitType::UNSET) + { + JLOG(j_.warn()) + << "HookError[" << account << "-" + << ctx_.tx.getAccountID(sfAccount) + << "]: Execution failure (no exit type specified) " + << "HookHash: " << hookHash; + } return tecHOOK_REJECTED; } @@ -1298,7 +1306,7 @@ Transactor::executeHookChain( { JLOG(j_.warn()) << "HookError[" << account << "-" - << ctx_.tx.getAccountID(sfAccount) << "]: " + << ctx_.tx.getAccountID(sfAccount) << "]: Execution failure (exceptional) " << "Exception: " << e.what() << " HookHash: " << hookHash; @@ -1426,13 +1434,13 @@ Transactor::doHookCallback( finalizeHookResult(callbackResult, ctx_, success); JLOG(j_.trace()) << "HookInfo[" << callbackAccountID << "-" - << ctx_.tx.getAccountID(sfAccount) << "]: " - << "Callback finalizeHookResult = " << result; + << ctx_.tx.getAccountID(sfAccount) + << "]: Callback finalizeHookResult = " << result; } catch (std::exception& e) { JLOG(j_.fatal()) << "HookError[" << callbackAccountID << "-" - << ctx_.tx.getAccountID(sfAccount) << "]: " + << ctx_.tx.getAccountID(sfAccount) << "]: Callback failure " << e.what(); } } @@ -1678,13 +1686,13 @@ Transactor::doAgainAsWeak( results.push_back(aawResult); JLOG(j_.trace()) << "HookInfo[" << hookAccountID << "-" - << ctx_.tx.getAccountID(sfAccount) << "]: " - << " aaw Hook ExitCode = " << aawResult.exitCode; + << ctx_.tx.getAccountID(sfAccount) + << "]: aaw Hook ExitCode = " << aawResult.exitCode; } catch (std::exception& e) { JLOG(j_.fatal()) << "HookError[" << hookAccountID << "-" - << ctx_.tx.getAccountID(sfAccount) << "]: " + << ctx_.tx.getAccountID(sfAccount) << "]: aaw failure " << e.what(); } } diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 715f5dac6..f479ecba7 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 75; +static constexpr std::size_t numFeatures = 76; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -363,6 +363,7 @@ extern uint256 const fixPageCap; extern uint256 const fix240911; extern uint256 const fixFloatDivide; extern uint256 const fixReduceImport; +extern uint256 const fixXahauV3; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 1c7fc931b..12c7b66c8 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -469,6 +469,7 @@ REGISTER_FIX (fixPageCap, Supported::yes, VoteBehavior::De REGISTER_FIX (fix240911, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fixFloatDivide, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fixReduceImport, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixXahauV3, Supported::yes, VoteBehavior::DefaultNo); // The following amendments are obsolete, but must remain supported // because they could potentially get enabled. From 12d8342c34d9500ef60cb56aee5873a0a4401178 Mon Sep 17 00:00:00 2001 From: Wietse Wind Date: Sat, 1 Feb 2025 08:57:25 +0100 Subject: [PATCH 12/33] Update artifact --- .github/workflows/levelization.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/levelization.yml b/.github/workflows/levelization.yml index c8284c5fb..1295bd393 100644 --- a/.github/workflows/levelization.yml +++ b/.github/workflows/levelization.yml @@ -18,8 +18,7 @@ jobs: git diff --exit-code | tee "levelization.patch" - name: Upload patch if: failure() && steps.assert.outcome == 'failure' - uses: actions/upload-artifact@v3 - continue-on-error: true + uses: actions/upload-artifact@v4 with: name: levelization.patch if-no-files-found: ignore From 412593d7bc178957875b9e71435b584bbadb5f36 Mon Sep 17 00:00:00 2001 From: Wietse Wind Date: Sat, 1 Feb 2025 08:57:48 +0100 Subject: [PATCH 13/33] Update artifact --- .github/workflows/clang-format.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml index 0cc8dbe59..52432adda 100644 --- a/.github/workflows/clang-format.yml +++ b/.github/workflows/clang-format.yml @@ -30,7 +30,7 @@ jobs: git diff --exit-code | tee "clang-format.patch" - name: Upload patch if: failure() && steps.assert.outcome == 'failure' - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 continue-on-error: true with: name: clang-format.patch From fa71bda29c0ab1f5d8a0b76734ce886e325445a6 Mon Sep 17 00:00:00 2001 From: Wietse Wind Date: Sat, 1 Feb 2025 08:58:13 +0100 Subject: [PATCH 14/33] Artifact v4 continue on error --- .github/workflows/levelization.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/levelization.yml b/.github/workflows/levelization.yml index 1295bd393..f99c1ca56 100644 --- a/.github/workflows/levelization.yml +++ b/.github/workflows/levelization.yml @@ -19,6 +19,7 @@ jobs: - name: Upload patch if: failure() && steps.assert.outcome == 'failure' uses: actions/upload-artifact@v4 + continue-on-error: true with: name: levelization.patch if-no-files-found: ignore From 2fd465bb3fbfaa821cd80d9f1bf4c4bd9f45ac6f Mon Sep 17 00:00:00 2001 From: RichardAH Date: Mon, 3 Feb 2025 10:33:19 +1000 Subject: [PATCH 15/33] fix20250131 (#428) Co-authored-by: Denis Angell --- src/ripple/app/hook/Guard.h | 33 ++++++++++++++++-- src/ripple/app/hook/guard_checker.cpp | 2 +- src/ripple/app/tx/impl/Change.cpp | 3 +- src/ripple/app/tx/impl/Remit.cpp | 10 ++++++ src/ripple/app/tx/impl/SetHook.cpp | 3 +- src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/impl/Feature.cpp | 3 +- src/ripple/protocol/impl/TxMeta.cpp | 4 ++- src/test/app/SetHookTSH_test.cpp | 1 - src/test/app/SetHook_test.cpp | 50 +++++++++++++++++++++++++++ 10 files changed, 103 insertions(+), 9 deletions(-) diff --git a/src/ripple/app/hook/Guard.h b/src/ripple/app/hook/Guard.h index 893fe9282..f395af448 100644 --- a/src/ripple/app/hook/Guard.h +++ b/src/ripple/app/hook/Guard.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -271,7 +272,19 @@ check_guard( int guard_func_idx, int last_import_idx, GuardLog guardLog, - std::string guardLogAccStr) + std::string guardLogAccStr, + /* RH NOTE: + * rules version is a bit field, so rule update 1 is 0x01, update 2 is 0x02 + * and update 3 is 0x04 ideally at rule version 3 all bits so far are set + * (0b111) so the ruleVersion = 7, however if a specific rule update must be + * rolled back due to unforeseen behaviour then this may no longer be the + * case. using a bit field here leaves us flexible to rollback changes that + * might have unforeseen consequences, without also rolling back further + * changes that are fine. + */ + uint64_t rulesVersion = 0 + +) { #define MAX_GUARD_CALLS 1024 uint32_t guard_count = 0; @@ -621,11 +634,17 @@ check_guard( } else if (fc_type == 10) // memory.copy { + if (rulesVersion & 0x02U) + GUARD_ERROR("Memory.copy instruction is not allowed."); + REQUIRE(2); ADVANCE(2); } else if (fc_type == 11) // memory.fill { + if (rulesVersion & 0x02U) + GUARD_ERROR("Memory.fill instruction is not allowed."); + ADVANCE(1); } else if (fc_type <= 7) // numeric instructions @@ -807,6 +826,15 @@ validateGuards( std::vector const& wasm, GuardLog guardLog, std::string guardLogAccStr, + /* RH NOTE: + * rules version is a bit field, so rule update 1 is 0x01, update 2 is 0x02 + * and update 3 is 0x04 ideally at rule version 3 all bits so far are set + * (0b111) so the ruleVersion = 7, however if a specific rule update must be + * rolled back due to unforeseen behaviour then this may no longer be the + * case. using a bit field here leaves us flexible to rollback changes that + * might have unforeseen consequences, without also rolling back further + * changes that are fine. + */ uint64_t rulesVersion = 0) { uint64_t byteCount = wasm.size(); @@ -1477,7 +1505,8 @@ validateGuards( guard_import_number, last_import_number, guardLog, - guardLogAccStr); + guardLogAccStr, + rulesVersion); if (!valid) return {}; diff --git a/src/ripple/app/hook/guard_checker.cpp b/src/ripple/app/hook/guard_checker.cpp index 634dd8a93..f20d24617 100644 --- a/src/ripple/app/hook/guard_checker.cpp +++ b/src/ripple/app/hook/guard_checker.cpp @@ -79,7 +79,7 @@ main(int argc, char** argv) close(fd); - auto result = validateGuards(hook, std::cout, "", 1); + auto result = validateGuards(hook, std::cout, "", 3); if (!result) { diff --git a/src/ripple/app/tx/impl/Change.cpp b/src/ripple/app/tx/impl/Change.cpp index c91b79403..61134ca25 100644 --- a/src/ripple/app/tx/impl/Change.cpp +++ b/src/ripple/app/tx/impl/Change.cpp @@ -587,7 +587,8 @@ Change::activateXahauGenesis() wasmBytes, // wasm to verify loggerStream, "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - ctx_.view().rules().enabled(featureHooksUpdate1) ? 1 : 0); + (ctx_.view().rules().enabled(featureHooksUpdate1) ? 1 : 0) + + (ctx_.view().rules().enabled(fix20250131) ? 2 : 0)); if (!result) { diff --git a/src/ripple/app/tx/impl/Remit.cpp b/src/ripple/app/tx/impl/Remit.cpp index 084513e02..33d8a4c6f 100644 --- a/src/ripple/app/tx/impl/Remit.cpp +++ b/src/ripple/app/tx/impl/Remit.cpp @@ -72,6 +72,16 @@ Remit::preflight(PreflightContext const& ctx) return temREDUNDANT; } + if (ctx.rules.enabled(fix20250131)) + { + if (!dstID || dstID == noAccount()) + { + JLOG(ctx.j.warn()) + << "Malformed transaction: Remit to invalid account."; + return temMALFORMED; + } + } + if (ctx.tx.isFieldPresent(sfInform)) { AccountID const infID = ctx.tx.getAccountID(sfInform); diff --git a/src/ripple/app/tx/impl/SetHook.cpp b/src/ripple/app/tx/impl/SetHook.cpp index aac02753c..67e89d993 100644 --- a/src/ripple/app/tx/impl/SetHook.cpp +++ b/src/ripple/app/tx/impl/SetHook.cpp @@ -479,7 +479,8 @@ SetHook::validateHookSetEntry(SetHookCtx& ctx, STObject const& hookSetObj) hook, // wasm to verify logger, hsacc, - ctx.rules.enabled(featureHooksUpdate1) ? 1 : 0); + (ctx.rules.enabled(featureHooksUpdate1) ? 1 : 0) + + (ctx.rules.enabled(fix20250131) ? 2 : 0)); if (ctx.j.trace()) { diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index f479ecba7..b242b2f7f 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 76; +static constexpr std::size_t numFeatures = 77; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -364,6 +364,7 @@ extern uint256 const fix240911; extern uint256 const fixFloatDivide; extern uint256 const fixReduceImport; extern uint256 const fixXahauV3; +extern uint256 const fix20250131; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 12c7b66c8..73db671ed 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -469,7 +469,8 @@ REGISTER_FIX (fixPageCap, Supported::yes, VoteBehavior::De REGISTER_FIX (fix240911, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fixFloatDivide, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fixReduceImport, Supported::yes, VoteBehavior::DefaultYes); -REGISTER_FIX (fixXahauV3, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixXahauV3, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fix20250131, Supported::yes, VoteBehavior::DefaultYes); // The following amendments are obsolete, but must remain supported // because they could potentially get enabled. diff --git a/src/ripple/protocol/impl/TxMeta.cpp b/src/ripple/protocol/impl/TxMeta.cpp index 4b48f5eb4..506c7f2a7 100644 --- a/src/ripple/protocol/impl/TxMeta.cpp +++ b/src/ripple/protocol/impl/TxMeta.cpp @@ -240,7 +240,9 @@ TxMeta::addRaw(Serializer& s, TER result, std::uint32_t index) { mResult = TERtoInt(result); mIndex = index; - assert((mResult == 0) || ((mResult > 100) && (mResult <= 255))); + assert( + (mResult == 0 || mResult == 1) || + ((mResult > 100) && (mResult <= 255))); mNodes.sort([](STObject const& o1, STObject const& o2) { return o1.getFieldH256(sfLedgerIndex) < o2.getFieldH256(sfLedgerIndex); diff --git a/src/test/app/SetHookTSH_test.cpp b/src/test/app/SetHookTSH_test.cpp index 30a561113..9b19c3936 100644 --- a/src/test/app/SetHookTSH_test.cpp +++ b/src/test/app/SetHookTSH_test.cpp @@ -5544,7 +5544,6 @@ public: testTSH(sa - fixXahauV1 - fixXahauV2); testTSH(sa - fixXahauV2); testTSH(sa); - testEmittedTxn(sa - fixXahauV2); testEmittedTxn(sa); } }; diff --git a/src/test/app/SetHook_test.cpp b/src/test/app/SetHook_test.cpp index f0e97a859..7f42e9700 100644 --- a/src/test/app/SetHook_test.cpp +++ b/src/test/app/SetHook_test.cpp @@ -1138,6 +1138,54 @@ public: : preHookCount + 66); } + void + testFillCopy(FeatureBitset features) + { + testcase("Test fill/copy"); + + // a hook containing memory.fill instruction + std::string hookFill = + "0061736d0100000001130360027f7f017f60037f7f7e017e60017f017e02" + "170203656e76025f67000003656e76066163636570740001030201020503" + "0100020621057f01418088040b7f004180080b7f004180080b7f00418088" + "040b7f004180080b07080104686f6f6b00020aa4800001a0800001017e23" + "01412a41e400fc0b004101410110001a41004100420010011a20010b"; + + // a hook containing memory.copy instruction + std::string hookCopy = + "0061736d0100000001130360027f7f017f60037f7f7e017e60017f017e02" + "170203656e76025f67000003656e76066163636570740001030201020503" + "0100020621057f01418088040b7f004180080b7f004180080b7f00418088" + "040b7f004180080b07080104686f6f6b00020aa5800001a1800001017e23" + "00230141e400fc0a00004101410110001a41004100420010011a20010b"; + + using namespace jtx; + + for (int withFix = 0; withFix < 2; ++withFix) + { + auto f = withFix ? features : features - fix20250131; + Env env{*this, f}; + + auto const alice = Account{"alice"}; + env.fund(XRP(10000), alice); + + auto const bob = Account{"bob"}; + env.fund(XRP(10000), bob); + + env(ripple::test::jtx::hook(alice, {{hso(hookFill)}}, 0), + M(withFix ? "hookFill - with fix" : "hookFill - zonder fix"), + HSFEE, + withFix ? ter(temMALFORMED) : ter(tesSUCCESS)); + + env(ripple::test::jtx::hook(bob, {{hso(hookCopy)}}, 0), + M(withFix ? "hookCopy - with fix" : "hookCopy - zonder fix"), + HSFEE, + withFix ? ter(temMALFORMED) : ter(tesSUCCESS)); + + env.close(); + } + } + void testCreate(FeatureBitset features) { @@ -11973,6 +12021,8 @@ public: testNSDeletePartial(features); testPageCap(features); + testFillCopy(features); + testWasm(features); test_accept(features); test_rollback(features); From 317bd4bc6e6c216c040cd407c620913322538d65 Mon Sep 17 00:00:00 2001 From: RichardAH Date: Mon, 3 Feb 2025 17:56:08 +1000 Subject: [PATCH 16/33] add strict filtering to account_tx api (#429) --- src/ripple/app/rdb/RelationalDatabase.h | 3 + src/ripple/app/rdb/backend/RWDBDatabase.h | 130 ++++++++++++++++-- .../app/rdb/backend/detail/impl/Node.cpp | 65 +++++++-- src/ripple/basics/strHex.h | 11 ++ src/ripple/rpc/handlers/AccountTx.cpp | 6 +- src/test/rpc/AccountTx_test.cpp | 62 +++++++++ 6 files changed, 247 insertions(+), 30 deletions(-) diff --git a/src/ripple/app/rdb/RelationalDatabase.h b/src/ripple/app/rdb/RelationalDatabase.h index a269bf256..11c592485 100644 --- a/src/ripple/app/rdb/RelationalDatabase.h +++ b/src/ripple/app/rdb/RelationalDatabase.h @@ -69,6 +69,7 @@ public: std::uint32_t offset; std::uint32_t limit; bool bUnlimited; + bool strict; }; struct AccountTxPageOptions @@ -79,6 +80,7 @@ public: std::optional marker; std::uint32_t limit; bool bAdmin; + bool strict; }; using AccountTx = @@ -101,6 +103,7 @@ public: bool forward = false; uint32_t limit = 0; std::optional marker; + bool strict; }; struct AccountTxResult diff --git a/src/ripple/app/rdb/backend/RWDBDatabase.h b/src/ripple/app/rdb/backend/RWDBDatabase.h index 9c6d70e7e..2858497a8 100644 --- a/src/ripple/app/rdb/backend/RWDBDatabase.h +++ b/src/ripple/app/rdb/backend/RWDBDatabase.h @@ -43,6 +43,62 @@ private: std::map transactionMap_; std::map accountTxMap_; + // helper function to scan for an account ID inside the tx and meta blobs + // used for strict filtering of account_tx + bool + isAccountInvolvedInTx(AccountID const& account, AccountTx const& accountTx) + { + auto const& txn = accountTx.first; + auto const& meta = accountTx.second; + + // Search metadata, excluding RegularKey false positives + Blob const metaBlob = meta->getAsObject().getSerializer().peekData(); + if (metaBlob.size() >= account.size()) + { + auto it = metaBlob.begin(); + while (true) + { + // Find next occurrence of account + it = std::search( + it, + metaBlob.end(), + account.data(), + account.data() + account.size()); + + if (it == metaBlob.end()) + break; + + // Check if this is a RegularKey field (0x8814 prefix) + if (it >= metaBlob.begin() + 2) + { + auto prefix = *(it - 2); + auto prefix2 = *(it - 1); + if (prefix != 0x88 || prefix2 != 0x14) + { + // Found account not preceded by RegularKey prefix + return true; + } + } + else + { + // Too close to start to be RegularKey + return true; + } + + ++it; // Move past this occurrence + } + } + + // Search transaction blob + Blob const txnBlob = txn->getSTransaction()->getSerializer().peekData(); + return txnBlob.size() >= account.size() && + std::search( + txnBlob.begin(), + txnBlob.end(), + account.data(), + account.data() + account.size()) != txnBlob.end(); + } + public: RWDBDatabase(Application& app, Config const& config, JobQueue& jobQueue) : app_(app), useTxTables_(config.useTxTables()) @@ -193,7 +249,17 @@ public: std::size_t count = 0; for (const auto& [_, accountData] : accountTxMap_) { - count += accountData.transactions.size(); + for (const auto& tx : accountData.transactions) + { + // RH NOTE: options isn't provided to this function + // but this function is probably only used internally + // so make it reflect the true number (unfiltered) + + // if (options.strict && + // !isAccountInvolvedInTx(options.account, tx)) + // continue; + count++; + } } return count; } @@ -607,12 +673,17 @@ public: { for (const auto& [txSeq, txIndex] : txIt->second) { + AccountTx const accountTx = accountData.transactions[txIndex]; + if (options.strict && + !isAccountInvolvedInTx(options.account, accountTx)) + continue; + if (skipped < options.offset) { ++skipped; continue; } - AccountTx const accountTx = accountData.transactions[txIndex]; + std::uint32_t const inLedger = rangeCheckedCast( accountTx.second->getLgrSeq()); accountTx.first->setStatus(COMMITTED); @@ -652,13 +723,18 @@ public: innerRIt != rIt->second.rend(); ++innerRIt) { + AccountTx const accountTx = + accountData.transactions[innerRIt->second]; + + if (options.strict && + !isAccountInvolvedInTx(options.account, accountTx)) + continue; + if (skipped < options.offset) { ++skipped; continue; } - AccountTx const accountTx = - accountData.transactions[innerRIt->second]; std::uint32_t const inLedger = rangeCheckedCast( accountTx.second->getLgrSeq()); accountTx.first->setLedger(inLedger); @@ -694,12 +770,19 @@ public: { for (const auto& [txSeq, txIndex] : txIt->second) { + AccountTx const accountTx = accountData.transactions[txIndex]; + + if (options.strict && + !isAccountInvolvedInTx(options.account, accountTx)) + continue; + + const auto& [txn, txMeta] = accountTx; + if (skipped < options.offset) { ++skipped; continue; } - const auto& [txn, txMeta] = accountData.transactions[txIndex]; result.emplace_back( txn->getSTransaction()->getSerializer().peekData(), txMeta->getAsObject().getSerializer().peekData(), @@ -738,13 +821,20 @@ public: innerRIt != rIt->second.rend(); ++innerRIt) { + AccountTx const accountTx = + accountData.transactions[innerRIt->second]; + + if (options.strict && + !isAccountInvolvedInTx(options.account, accountTx)) + continue; + + const auto& [txn, txMeta] = accountTx; + if (skipped < options.offset) { ++skipped; continue; } - const auto& [txn, txMeta] = - accountData.transactions[innerRIt->second]; result.emplace_back( txn->getSTransaction()->getSerializer().peekData(), txMeta->getAsObject().getSerializer().peekData(), @@ -838,18 +928,23 @@ public: return {newmarker, total}; } - Blob rawTxn = accountData.transactions[index] - .first->getSTransaction() + AccountTx const& accountTx = + accountData.transactions[index]; + + Blob rawTxn = accountTx.first->getSTransaction() ->getSerializer() .peekData(); - Blob rawMeta = accountData.transactions[index] - .second->getAsObject() + Blob rawMeta = accountTx.second->getAsObject() .getSerializer() .peekData(); if (rawMeta.size() == 0) onUnsavedLedger(ledgerSeq); + if (options.strict && + !isAccountInvolvedInTx(options.account, accountTx)) + continue; + onTransaction( rangeCheckedCast(ledgerSeq), "COMMITTED", @@ -893,18 +988,23 @@ public: return {newmarker, total}; } - Blob rawTxn = accountData.transactions[index] - .first->getSTransaction() + AccountTx const& accountTx = + accountData.transactions[index]; + + Blob rawTxn = accountTx.first->getSTransaction() ->getSerializer() .peekData(); - Blob rawMeta = accountData.transactions[index] - .second->getAsObject() + Blob rawMeta = accountTx.second->getAsObject() .getSerializer() .peekData(); if (rawMeta.size() == 0) onUnsavedLedger(ledgerSeq); + if (options.strict && + !isAccountInvolvedInTx(options.account, accountTx)) + continue; + onTransaction( rangeCheckedCast(ledgerSeq), "COMMITTED", diff --git a/src/ripple/app/rdb/backend/detail/impl/Node.cpp b/src/ripple/app/rdb/backend/detail/impl/Node.cpp index c80038ef7..8838afff0 100644 --- a/src/ripple/app/rdb/backend/detail/impl/Node.cpp +++ b/src/ripple/app/rdb/backend/detail/impl/Node.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -758,14 +759,34 @@ transactionsSQL( options.minLedger); } + // Convert account ID to hex string for binary search + std::string accountHex = + strHex(options.account.data(), options.account.size()); + std::string sql; + // For metadata search: + // 1. Look for account ID not preceded by 8814 (RegularKey field) + // 2. OR look for account in raw transaction + std::string filterClause = options.strict ? "AND ((" + "hex(TxnMeta) LIKE '%" + + accountHex + + "%' AND " + "hex(TxnMeta) NOT LIKE '%8814" + + accountHex + + "%'" + ") OR hex(RawTxn) LIKE '%" + + accountHex + "%')" + : ""; + if (count) sql = boost::str( boost::format("SELECT %s FROM AccountTransactions " - "WHERE Account = '%s' %s %s LIMIT %u, %u;") % - selection % toBase58(options.account) % maxClause % minClause % - beast::lexicalCastThrow(options.offset) % + "INNER JOIN Transactions ON Transactions.TransID = " + "AccountTransactions.TransID " + "WHERE Account = '%s' %s %s %s LIMIT %u, %u;") % + selection % toBase58(options.account) % filterClause % maxClause % + minClause % beast::lexicalCastThrow(options.offset) % beast::lexicalCastThrow(numberOfResults)); else sql = boost::str( @@ -773,15 +794,16 @@ transactionsSQL( "SELECT %s FROM " "AccountTransactions INNER JOIN Transactions " "ON Transactions.TransID = AccountTransactions.TransID " - "WHERE Account = '%s' %s %s " + "WHERE Account = '%s' %s %s %s " "ORDER BY AccountTransactions.LedgerSeq %s, " "AccountTransactions.TxnSeq %s, AccountTransactions.TransID %s " "LIMIT %u, %u;") % - selection % toBase58(options.account) % maxClause % minClause % + selection % toBase58(options.account) % filterClause % maxClause % + minClause % (descending ? "DESC" : "ASC") % (descending ? "DESC" : "ASC") % (descending ? "DESC" : "ASC") % - (descending ? "DESC" : "ASC") % beast::lexicalCastThrow(options.offset) % beast::lexicalCastThrow(numberOfResults)); + JLOG(j.trace()) << "txSQL query: " << sql; return sql; } @@ -1114,6 +1136,21 @@ accountTxPage( if (limit_used > 0) newmarker = options.marker; + // Convert account ID to hex string for binary search + std::string accountHex = + strHex(options.account.data(), options.account.size()); + + // Add metadata search filter similar to transactionsSQL + std::string filterClause = options.strict + ? " AND ((hex(TxnMeta) LIKE '%" + accountHex + + "%' " + "AND hex(TxnMeta) NOT LIKE '%8814" + + accountHex + + "%') " + "OR hex(RawTxn) LIKE '%" + + accountHex + "%')" + : ""; + static std::string const prefix( R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, Status,RawTxn,TxnMeta @@ -1132,12 +1169,12 @@ accountTxPage( { sql = boost::str( boost::format( - prefix + (R"(AccountTransactions.LedgerSeq BETWEEN %u AND %u + prefix + (R"(AccountTransactions.LedgerSeq BETWEEN %u AND %u %s ORDER BY AccountTransactions.LedgerSeq %s, AccountTransactions.TxnSeq %s LIMIT %u;)")) % toBase58(options.account) % options.minLedger % options.maxLedger % - order % order % queryLimit); + filterClause % order % order % queryLimit); } else { @@ -1150,25 +1187,25 @@ accountTxPage( auto b58acct = toBase58(options.account); sql = boost::str( boost::format(( - R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, - Status,RawTxn,TxnMeta + R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq,Status,RawTxn,TxnMeta FROM AccountTransactions, Transactions WHERE (AccountTransactions.TransID = Transactions.TransID AND AccountTransactions.Account = '%s' AND - AccountTransactions.LedgerSeq BETWEEN %u AND %u) + AccountTransactions.LedgerSeq BETWEEN %u AND %u) %s UNION SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq,Status,RawTxn,TxnMeta FROM AccountTransactions, Transactions WHERE (AccountTransactions.TransID = Transactions.TransID AND AccountTransactions.Account = '%s' AND AccountTransactions.LedgerSeq = %u AND - AccountTransactions.TxnSeq %s %u) + AccountTransactions.TxnSeq %s %u) %s ORDER BY AccountTransactions.LedgerSeq %s, AccountTransactions.TxnSeq %s LIMIT %u; )")) % - b58acct % minLedger % maxLedger % b58acct % findLedger % compare % - findSeq % order % order % queryLimit); + b58acct % minLedger % maxLedger % filterClause % b58acct % + findLedger % compare % findSeq % filterClause % order % order % + queryLimit); } { diff --git a/src/ripple/basics/strHex.h b/src/ripple/basics/strHex.h index 257fb540b..b55ee9e87 100644 --- a/src/ripple/basics/strHex.h +++ b/src/ripple/basics/strHex.h @@ -40,6 +40,17 @@ strHex(FwdIt begin, FwdIt end) return result; } +template +std::string +strHex(FwdIt begin, std::size_t length) +{ + std::string result; + result.reserve(2 * length); + boost::algorithm::hex( + begin, std::next(begin, length), std::back_inserter(result)); + return result; +} + template ().begin())> std::string strHex(T const& from) diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index f65657d92..52389f4e5 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -223,7 +223,8 @@ doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) result.ledgerRange.max, result.marker, args.limit, - isUnlimited(context.role)}; + isUnlimited(context.role), + args.strict}; auto const db = dynamic_cast(&context.app.getRelationalDatabase()); @@ -369,6 +370,9 @@ doAccountTxJson(RPC::JsonContext& context) args.forward = params.isMember(jss::forward) && params[jss::forward].asBool(); + args.strict = + params.isMember(jss::strict) ? params[jss::strict].asBool() : true; + if (!params.isMember(jss::account)) return rpcError(rpcINVALID_PARAMS); diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 85dd2978d..6a658e5b8 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -245,6 +245,68 @@ class AccountTx_test : public beast::unit_test::suite p[jss::ledger_hash] = to_string(env.closed()->info().parentHash); BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p)))); } + + // Strict + { + Account S1{"S1"}; + Account S2{"S2"}; + Account S3{"S3"}; + env.fund(XRP(10000), S1); + env.fund(XRP(10000), S2); + env.fund(XRP(10000), S3); + env.close(); + + // Regular key set + env(regkey(S1, S2)); + env.close(); + + // we'll make a payment between S1 and S3 + env(pay(S1, S3, XRP(100))); + env.close(); + + auto hasTxs = [](Json::Value const& j, bool strict) { + if (!j.isMember(jss::result) || + j[jss::result][jss::status] != "success") + return false; + + if (strict) + { + return (j[jss::result][jss::transactions].size() == 3) && + (j[jss::result][jss::transactions][0u][jss::tx] + [jss::TransactionType] == jss::SetRegularKey) && + (j[jss::result][jss::transactions][1u][jss::tx] + [jss::TransactionType] == jss::AccountSet) && + (j[jss::result][jss::transactions][2u][jss::tx] + [jss::TransactionType] == jss::Payment); + } + + return (j[jss::result][jss::transactions].size() == 4) && + (j[jss::result][jss::transactions][0u][jss::tx] + [jss::TransactionType] == jss::Payment) && + (j[jss::result][jss::transactions][1u][jss::tx] + [jss::TransactionType] == jss::SetRegularKey) && + (j[jss::result][jss::transactions][2u][jss::tx] + [jss::TransactionType] == jss::AccountSet) && + (j[jss::result][jss::transactions][3u][jss::tx] + [jss::TransactionType] == jss::Payment); + }; + + Json::Value p{jParms}; + p[jss::account] = S2.human(); + + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)), true)); + + p[jss::strict] = true; + + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)), true)); + + p[jss::strict] = false; + + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)), false)); + } } void From da8df63be3ee113a66764e320c95d35a18a3eb41 Mon Sep 17 00:00:00 2001 From: Richard Holland Date: Tue, 4 Feb 2025 17:02:17 +1100 Subject: [PATCH 17/33] debug account tx tests under release builder --- src/test/rpc/AccountTx_test.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 6a658e5b8..ed33f924f 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -269,6 +269,10 @@ class AccountTx_test : public beast::unit_test::suite j[jss::result][jss::status] != "success") return false; + std::cout << "hasTx " << (strict ? "strict" : "not strict") + << ":\n" + << to_string(j) << "\n"; + if (strict) { return (j[jss::result][jss::transactions].size() == 3) && From e0b63ac70e0a19c121f06879b73b69f64239de09 Mon Sep 17 00:00:00 2001 From: Richard Holland Date: Wed, 5 Feb 2025 14:57:35 +1100 Subject: [PATCH 18/33] Revert "debug account tx tests under release builder" This reverts commit da8df63be3ee113a66764e320c95d35a18a3eb41. Revert "add strict filtering to account_tx api (#429)" This reverts commit 317bd4bc6e6c216c040cd407c620913322538d65. --- src/ripple/app/rdb/RelationalDatabase.h | 3 - src/ripple/app/rdb/backend/RWDBDatabase.h | 130 ++---------------- .../app/rdb/backend/detail/impl/Node.cpp | 65 ++------- src/ripple/basics/strHex.h | 11 -- src/ripple/rpc/handlers/AccountTx.cpp | 6 +- src/test/rpc/AccountTx_test.cpp | 66 --------- 6 files changed, 30 insertions(+), 251 deletions(-) diff --git a/src/ripple/app/rdb/RelationalDatabase.h b/src/ripple/app/rdb/RelationalDatabase.h index 11c592485..a269bf256 100644 --- a/src/ripple/app/rdb/RelationalDatabase.h +++ b/src/ripple/app/rdb/RelationalDatabase.h @@ -69,7 +69,6 @@ public: std::uint32_t offset; std::uint32_t limit; bool bUnlimited; - bool strict; }; struct AccountTxPageOptions @@ -80,7 +79,6 @@ public: std::optional marker; std::uint32_t limit; bool bAdmin; - bool strict; }; using AccountTx = @@ -103,7 +101,6 @@ public: bool forward = false; uint32_t limit = 0; std::optional marker; - bool strict; }; struct AccountTxResult diff --git a/src/ripple/app/rdb/backend/RWDBDatabase.h b/src/ripple/app/rdb/backend/RWDBDatabase.h index 2858497a8..9c6d70e7e 100644 --- a/src/ripple/app/rdb/backend/RWDBDatabase.h +++ b/src/ripple/app/rdb/backend/RWDBDatabase.h @@ -43,62 +43,6 @@ private: std::map transactionMap_; std::map accountTxMap_; - // helper function to scan for an account ID inside the tx and meta blobs - // used for strict filtering of account_tx - bool - isAccountInvolvedInTx(AccountID const& account, AccountTx const& accountTx) - { - auto const& txn = accountTx.first; - auto const& meta = accountTx.second; - - // Search metadata, excluding RegularKey false positives - Blob const metaBlob = meta->getAsObject().getSerializer().peekData(); - if (metaBlob.size() >= account.size()) - { - auto it = metaBlob.begin(); - while (true) - { - // Find next occurrence of account - it = std::search( - it, - metaBlob.end(), - account.data(), - account.data() + account.size()); - - if (it == metaBlob.end()) - break; - - // Check if this is a RegularKey field (0x8814 prefix) - if (it >= metaBlob.begin() + 2) - { - auto prefix = *(it - 2); - auto prefix2 = *(it - 1); - if (prefix != 0x88 || prefix2 != 0x14) - { - // Found account not preceded by RegularKey prefix - return true; - } - } - else - { - // Too close to start to be RegularKey - return true; - } - - ++it; // Move past this occurrence - } - } - - // Search transaction blob - Blob const txnBlob = txn->getSTransaction()->getSerializer().peekData(); - return txnBlob.size() >= account.size() && - std::search( - txnBlob.begin(), - txnBlob.end(), - account.data(), - account.data() + account.size()) != txnBlob.end(); - } - public: RWDBDatabase(Application& app, Config const& config, JobQueue& jobQueue) : app_(app), useTxTables_(config.useTxTables()) @@ -249,17 +193,7 @@ public: std::size_t count = 0; for (const auto& [_, accountData] : accountTxMap_) { - for (const auto& tx : accountData.transactions) - { - // RH NOTE: options isn't provided to this function - // but this function is probably only used internally - // so make it reflect the true number (unfiltered) - - // if (options.strict && - // !isAccountInvolvedInTx(options.account, tx)) - // continue; - count++; - } + count += accountData.transactions.size(); } return count; } @@ -673,17 +607,12 @@ public: { for (const auto& [txSeq, txIndex] : txIt->second) { - AccountTx const accountTx = accountData.transactions[txIndex]; - if (options.strict && - !isAccountInvolvedInTx(options.account, accountTx)) - continue; - if (skipped < options.offset) { ++skipped; continue; } - + AccountTx const accountTx = accountData.transactions[txIndex]; std::uint32_t const inLedger = rangeCheckedCast( accountTx.second->getLgrSeq()); accountTx.first->setStatus(COMMITTED); @@ -723,18 +652,13 @@ public: innerRIt != rIt->second.rend(); ++innerRIt) { - AccountTx const accountTx = - accountData.transactions[innerRIt->second]; - - if (options.strict && - !isAccountInvolvedInTx(options.account, accountTx)) - continue; - if (skipped < options.offset) { ++skipped; continue; } + AccountTx const accountTx = + accountData.transactions[innerRIt->second]; std::uint32_t const inLedger = rangeCheckedCast( accountTx.second->getLgrSeq()); accountTx.first->setLedger(inLedger); @@ -770,19 +694,12 @@ public: { for (const auto& [txSeq, txIndex] : txIt->second) { - AccountTx const accountTx = accountData.transactions[txIndex]; - - if (options.strict && - !isAccountInvolvedInTx(options.account, accountTx)) - continue; - - const auto& [txn, txMeta] = accountTx; - if (skipped < options.offset) { ++skipped; continue; } + const auto& [txn, txMeta] = accountData.transactions[txIndex]; result.emplace_back( txn->getSTransaction()->getSerializer().peekData(), txMeta->getAsObject().getSerializer().peekData(), @@ -821,20 +738,13 @@ public: innerRIt != rIt->second.rend(); ++innerRIt) { - AccountTx const accountTx = - accountData.transactions[innerRIt->second]; - - if (options.strict && - !isAccountInvolvedInTx(options.account, accountTx)) - continue; - - const auto& [txn, txMeta] = accountTx; - if (skipped < options.offset) { ++skipped; continue; } + const auto& [txn, txMeta] = + accountData.transactions[innerRIt->second]; result.emplace_back( txn->getSTransaction()->getSerializer().peekData(), txMeta->getAsObject().getSerializer().peekData(), @@ -928,23 +838,18 @@ public: return {newmarker, total}; } - AccountTx const& accountTx = - accountData.transactions[index]; - - Blob rawTxn = accountTx.first->getSTransaction() + Blob rawTxn = accountData.transactions[index] + .first->getSTransaction() ->getSerializer() .peekData(); - Blob rawMeta = accountTx.second->getAsObject() + Blob rawMeta = accountData.transactions[index] + .second->getAsObject() .getSerializer() .peekData(); if (rawMeta.size() == 0) onUnsavedLedger(ledgerSeq); - if (options.strict && - !isAccountInvolvedInTx(options.account, accountTx)) - continue; - onTransaction( rangeCheckedCast(ledgerSeq), "COMMITTED", @@ -988,23 +893,18 @@ public: return {newmarker, total}; } - AccountTx const& accountTx = - accountData.transactions[index]; - - Blob rawTxn = accountTx.first->getSTransaction() + Blob rawTxn = accountData.transactions[index] + .first->getSTransaction() ->getSerializer() .peekData(); - Blob rawMeta = accountTx.second->getAsObject() + Blob rawMeta = accountData.transactions[index] + .second->getAsObject() .getSerializer() .peekData(); if (rawMeta.size() == 0) onUnsavedLedger(ledgerSeq); - if (options.strict && - !isAccountInvolvedInTx(options.account, accountTx)) - continue; - onTransaction( rangeCheckedCast(ledgerSeq), "COMMITTED", diff --git a/src/ripple/app/rdb/backend/detail/impl/Node.cpp b/src/ripple/app/rdb/backend/detail/impl/Node.cpp index 8838afff0..c80038ef7 100644 --- a/src/ripple/app/rdb/backend/detail/impl/Node.cpp +++ b/src/ripple/app/rdb/backend/detail/impl/Node.cpp @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -759,34 +758,14 @@ transactionsSQL( options.minLedger); } - // Convert account ID to hex string for binary search - std::string accountHex = - strHex(options.account.data(), options.account.size()); - std::string sql; - // For metadata search: - // 1. Look for account ID not preceded by 8814 (RegularKey field) - // 2. OR look for account in raw transaction - std::string filterClause = options.strict ? "AND ((" - "hex(TxnMeta) LIKE '%" + - accountHex + - "%' AND " - "hex(TxnMeta) NOT LIKE '%8814" + - accountHex + - "%'" - ") OR hex(RawTxn) LIKE '%" + - accountHex + "%')" - : ""; - if (count) sql = boost::str( boost::format("SELECT %s FROM AccountTransactions " - "INNER JOIN Transactions ON Transactions.TransID = " - "AccountTransactions.TransID " - "WHERE Account = '%s' %s %s %s LIMIT %u, %u;") % - selection % toBase58(options.account) % filterClause % maxClause % - minClause % beast::lexicalCastThrow(options.offset) % + "WHERE Account = '%s' %s %s LIMIT %u, %u;") % + selection % toBase58(options.account) % maxClause % minClause % + beast::lexicalCastThrow(options.offset) % beast::lexicalCastThrow(numberOfResults)); else sql = boost::str( @@ -794,16 +773,15 @@ transactionsSQL( "SELECT %s FROM " "AccountTransactions INNER JOIN Transactions " "ON Transactions.TransID = AccountTransactions.TransID " - "WHERE Account = '%s' %s %s %s " + "WHERE Account = '%s' %s %s " "ORDER BY AccountTransactions.LedgerSeq %s, " "AccountTransactions.TxnSeq %s, AccountTransactions.TransID %s " "LIMIT %u, %u;") % - selection % toBase58(options.account) % filterClause % maxClause % - minClause % (descending ? "DESC" : "ASC") % + selection % toBase58(options.account) % maxClause % minClause % (descending ? "DESC" : "ASC") % (descending ? "DESC" : "ASC") % + (descending ? "DESC" : "ASC") % beast::lexicalCastThrow(options.offset) % beast::lexicalCastThrow(numberOfResults)); - JLOG(j.trace()) << "txSQL query: " << sql; return sql; } @@ -1136,21 +1114,6 @@ accountTxPage( if (limit_used > 0) newmarker = options.marker; - // Convert account ID to hex string for binary search - std::string accountHex = - strHex(options.account.data(), options.account.size()); - - // Add metadata search filter similar to transactionsSQL - std::string filterClause = options.strict - ? " AND ((hex(TxnMeta) LIKE '%" + accountHex + - "%' " - "AND hex(TxnMeta) NOT LIKE '%8814" + - accountHex + - "%') " - "OR hex(RawTxn) LIKE '%" + - accountHex + "%')" - : ""; - static std::string const prefix( R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, Status,RawTxn,TxnMeta @@ -1169,12 +1132,12 @@ accountTxPage( { sql = boost::str( boost::format( - prefix + (R"(AccountTransactions.LedgerSeq BETWEEN %u AND %u %s + prefix + (R"(AccountTransactions.LedgerSeq BETWEEN %u AND %u ORDER BY AccountTransactions.LedgerSeq %s, AccountTransactions.TxnSeq %s LIMIT %u;)")) % toBase58(options.account) % options.minLedger % options.maxLedger % - filterClause % order % order % queryLimit); + order % order % queryLimit); } else { @@ -1187,25 +1150,25 @@ accountTxPage( auto b58acct = toBase58(options.account); sql = boost::str( boost::format(( - R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq,Status,RawTxn,TxnMeta + R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, + Status,RawTxn,TxnMeta FROM AccountTransactions, Transactions WHERE (AccountTransactions.TransID = Transactions.TransID AND AccountTransactions.Account = '%s' AND - AccountTransactions.LedgerSeq BETWEEN %u AND %u) %s + AccountTransactions.LedgerSeq BETWEEN %u AND %u) UNION SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq,Status,RawTxn,TxnMeta FROM AccountTransactions, Transactions WHERE (AccountTransactions.TransID = Transactions.TransID AND AccountTransactions.Account = '%s' AND AccountTransactions.LedgerSeq = %u AND - AccountTransactions.TxnSeq %s %u) %s + AccountTransactions.TxnSeq %s %u) ORDER BY AccountTransactions.LedgerSeq %s, AccountTransactions.TxnSeq %s LIMIT %u; )")) % - b58acct % minLedger % maxLedger % filterClause % b58acct % - findLedger % compare % findSeq % filterClause % order % order % - queryLimit); + b58acct % minLedger % maxLedger % b58acct % findLedger % compare % + findSeq % order % order % queryLimit); } { diff --git a/src/ripple/basics/strHex.h b/src/ripple/basics/strHex.h index b55ee9e87..257fb540b 100644 --- a/src/ripple/basics/strHex.h +++ b/src/ripple/basics/strHex.h @@ -40,17 +40,6 @@ strHex(FwdIt begin, FwdIt end) return result; } -template -std::string -strHex(FwdIt begin, std::size_t length) -{ - std::string result; - result.reserve(2 * length); - boost::algorithm::hex( - begin, std::next(begin, length), std::back_inserter(result)); - return result; -} - template ().begin())> std::string strHex(T const& from) diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index 52389f4e5..f65657d92 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -223,8 +223,7 @@ doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) result.ledgerRange.max, result.marker, args.limit, - isUnlimited(context.role), - args.strict}; + isUnlimited(context.role)}; auto const db = dynamic_cast(&context.app.getRelationalDatabase()); @@ -370,9 +369,6 @@ doAccountTxJson(RPC::JsonContext& context) args.forward = params.isMember(jss::forward) && params[jss::forward].asBool(); - args.strict = - params.isMember(jss::strict) ? params[jss::strict].asBool() : true; - if (!params.isMember(jss::account)) return rpcError(rpcINVALID_PARAMS); diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index ed33f924f..85dd2978d 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -245,72 +245,6 @@ class AccountTx_test : public beast::unit_test::suite p[jss::ledger_hash] = to_string(env.closed()->info().parentHash); BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p)))); } - - // Strict - { - Account S1{"S1"}; - Account S2{"S2"}; - Account S3{"S3"}; - env.fund(XRP(10000), S1); - env.fund(XRP(10000), S2); - env.fund(XRP(10000), S3); - env.close(); - - // Regular key set - env(regkey(S1, S2)); - env.close(); - - // we'll make a payment between S1 and S3 - env(pay(S1, S3, XRP(100))); - env.close(); - - auto hasTxs = [](Json::Value const& j, bool strict) { - if (!j.isMember(jss::result) || - j[jss::result][jss::status] != "success") - return false; - - std::cout << "hasTx " << (strict ? "strict" : "not strict") - << ":\n" - << to_string(j) << "\n"; - - if (strict) - { - return (j[jss::result][jss::transactions].size() == 3) && - (j[jss::result][jss::transactions][0u][jss::tx] - [jss::TransactionType] == jss::SetRegularKey) && - (j[jss::result][jss::transactions][1u][jss::tx] - [jss::TransactionType] == jss::AccountSet) && - (j[jss::result][jss::transactions][2u][jss::tx] - [jss::TransactionType] == jss::Payment); - } - - return (j[jss::result][jss::transactions].size() == 4) && - (j[jss::result][jss::transactions][0u][jss::tx] - [jss::TransactionType] == jss::Payment) && - (j[jss::result][jss::transactions][1u][jss::tx] - [jss::TransactionType] == jss::SetRegularKey) && - (j[jss::result][jss::transactions][2u][jss::tx] - [jss::TransactionType] == jss::AccountSet) && - (j[jss::result][jss::transactions][3u][jss::tx] - [jss::TransactionType] == jss::Payment); - }; - - Json::Value p{jParms}; - p[jss::account] = S2.human(); - - BEAST_EXPECT( - hasTxs(env.rpc("json", "account_tx", to_string(p)), true)); - - p[jss::strict] = true; - - BEAST_EXPECT( - hasTxs(env.rpc("json", "account_tx", to_string(p)), true)); - - p[jss::strict] = false; - - BEAST_EXPECT( - hasTxs(env.rpc("json", "account_tx", to_string(p)), false)); - } } void From 1fb1a99ea262eda3661796c234f942033a40c57a Mon Sep 17 00:00:00 2001 From: Wietse Wind Date: Wed, 5 Feb 2025 08:23:49 +0100 Subject: [PATCH 19/33] Update build-in-docker.yml --- .github/workflows/build-in-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-in-docker.yml b/.github/workflows/build-in-docker.yml index 9a9018489..3bc0bc20f 100644 --- a/.github/workflows/build-in-docker.yml +++ b/.github/workflows/build-in-docker.yml @@ -14,7 +14,7 @@ jobs: checkout: runs-on: [self-hosted, vanity] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: clean: false checkpatterns: From 230873f1964dacb71a66e637ae25cf1fed5bbe5b Mon Sep 17 00:00:00 2001 From: Richard Holland Date: Thu, 6 Feb 2025 15:21:37 +1100 Subject: [PATCH 20/33] debug gh builds --- release-builder.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/release-builder.sh b/release-builder.sh index f4d14a78a..ec3b063b7 100755 --- a/release-builder.sh +++ b/release-builder.sh @@ -36,7 +36,8 @@ fi STATIC_CONTAINER=$(docker ps -a | grep $CONTAINER_NAME |wc -l) -if [[ "$STATIC_CONTAINER" -gt "0" && "$GITHUB_REPOSITORY" != "" ]]; then +#if [[ "$STATIC_CONTAINER" -gt "0" && "$GITHUB_REPOSITORY" != "" ]]; then +if false; then echo "Static container, execute in static container to have max. cache" docker start $CONTAINER_NAME docker exec -i $CONTAINER_NAME /hbb_exe/activate-exec bash -x /io/build-core.sh "$GITHUB_REPOSITORY" "$GITHUB_SHA" "$BUILD_CORES" "$GITHUB_RUN_NUMBER" From 420240a2ab413c68d486dac4586ce1f7fb4af5ae Mon Sep 17 00:00:00 2001 From: tequ Date: Mon, 24 Feb 2025 16:46:42 +0900 Subject: [PATCH 21/33] Fixed not to use a large fixed range in the magic_enum. (#436) --- src/ripple/rpc/handlers/ServerDefinitions.cpp | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/ripple/rpc/handlers/ServerDefinitions.cpp b/src/ripple/rpc/handlers/ServerDefinitions.cpp index e0260fdcf..e44ef2a94 100644 --- a/src/ripple/rpc/handlers/ServerDefinitions.cpp +++ b/src/ripple/rpc/handlers/ServerDefinitions.cpp @@ -36,12 +36,12 @@ #include #include -#define MAGIC_ENUM(x) \ +#define MAGIC_ENUM(x, _min, _max) \ template <> \ struct magic_enum::customize::enum_range \ { \ - static constexpr int min = -20000; \ - static constexpr int max = 20000; \ + static constexpr int min = _min; \ + static constexpr int max = _max; \ }; #define MAGIC_ENUM_16(x) \ @@ -59,14 +59,14 @@ static constexpr bool is_flags = true; \ }; -MAGIC_ENUM(ripple::SerializedTypeID); -MAGIC_ENUM(ripple::LedgerEntryType); -MAGIC_ENUM(ripple::TELcodes); -MAGIC_ENUM(ripple::TEMcodes); -MAGIC_ENUM(ripple::TEFcodes); -MAGIC_ENUM(ripple::TERcodes); -MAGIC_ENUM(ripple::TEScodes); -MAGIC_ENUM(ripple::TECcodes); +MAGIC_ENUM(ripple::SerializedTypeID, -2, 10004); +MAGIC_ENUM(ripple::LedgerEntryType, 0, 255); +MAGIC_ENUM(ripple::TELcodes, -399, 300); +MAGIC_ENUM(ripple::TEMcodes, -299, -200); +MAGIC_ENUM(ripple::TEFcodes, -199, -100); +MAGIC_ENUM(ripple::TERcodes, -99, -1); +MAGIC_ENUM(ripple::TEScodes, 0, 1); +MAGIC_ENUM(ripple::TECcodes, 100, 255); MAGIC_ENUM_16(ripple::TxType); MAGIC_ENUM_FLAG(ripple::UniversalFlags); MAGIC_ENUM_FLAG(ripple::AccountSetFlags); From 8ccff44e8cd009f595e324608b085d84d28aa7d7 Mon Sep 17 00:00:00 2001 From: tequ Date: Mon, 24 Feb 2025 17:16:21 +0900 Subject: [PATCH 22/33] Fix Error handling on build action (#412) --- build-core.sh | 2 ++ build-full.sh | 12 +++++++----- release-builder.sh | 2 ++ 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/build-core.sh b/build-core.sh index 6d95ccea1..d06d029bc 100755 --- a/build-core.sh +++ b/build-core.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -e + echo "START INSIDE CONTAINER - CORE" echo "-- BUILD CORES: $3" diff --git a/build-full.sh b/build-full.sh index 3ae0251d7..14666b652 100755 --- a/build-full.sh +++ b/build-full.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -e + echo "START INSIDE CONTAINER - FULL" echo "-- BUILD CORES: $3" @@ -19,7 +21,7 @@ yum-config-manager --disable centos-sclo-sclo #### cd /io; -mkdir src/certs; +mkdir -p src/certs; curl --silent -k https://raw.githubusercontent.com/RichardAH/rippled-release-builder/main/ca-bundle/certbundle.h -o src/certs/certbundle.h; if [ "`grep certbundle.h src/ripple/net/impl/RegisterSSLCerts.cpp | wc -l`" -eq "0" ] then @@ -66,8 +68,8 @@ then #endif/g" src/ripple/net/impl/RegisterSSLCerts.cpp && sed -i "s/#include /\0\n#include /g" src/ripple/net/impl/RegisterSSLCerts.cpp fi -mkdir .nih_c; -mkdir .nih_toolchain; +mkdir -p .nih_c; +mkdir -p .nih_toolchain; cd .nih_toolchain && yum install -y wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-10-binutils zlib-static ncurses-static -y \ devtoolset-7-gcc-c++ \ @@ -115,7 +117,7 @@ tar -xf libunwind-13.0.1.src.tar.xz && cp -r libunwind-13.0.1.src/include libunwind-13.0.1.src/src lld-13.0.1.src/ && cd lld-13.0.1.src && rm -rf build CMakeCache.txt && -mkdir build && +mkdir -p build && cd build && cmake .. -DLLVM_LIBRARY_DIR=/usr/lib64/llvm13/lib/ -DCMAKE_INSTALL_PREFIX=/usr/lib64/llvm13/ -DCMAKE_BUILD_TYPE=Release && make -j$3 install && @@ -125,7 +127,7 @@ cd ../../ && echo "-- Build WasmEdge --" && ( wget -nc -q https://github.com/WasmEdge/WasmEdge/archive/refs/tags/0.11.2.zip; unzip -o 0.11.2.zip; ) && cd WasmEdge-0.11.2 && -( mkdir build; echo "" ) && +( mkdir -p build; echo "" ) && cd build && export BOOST_ROOT="/usr/local/src/boost_1_86_0" && export Boost_LIBRARY_DIRS="/usr/local/lib" && diff --git a/release-builder.sh b/release-builder.sh index ec3b063b7..7c070ac42 100755 --- a/release-builder.sh +++ b/release-builder.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -e + echo "START BUILDING (HOST)" echo "Cleaning previously built binary" From 89cacb12582a71ce1d18fe263a130ce83aeaf3ab Mon Sep 17 00:00:00 2001 From: tequ Date: Mon, 24 Feb 2025 18:33:21 +0900 Subject: [PATCH 23/33] Enhance shell script error handling and debugging on GHA (#447) --- build-core.sh | 7 ++++++- build-full.sh | 7 ++++++- release-builder.sh | 7 ++++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/build-core.sh b/build-core.sh index d06d029bc..be73bd348 100755 --- a/build-core.sh +++ b/build-core.sh @@ -1,4 +1,9 @@ -#!/bin/bash +#!/bin/bash -u +# We use set -e and bash with -u to bail on first non zero exit code of any +# processes launched or upon any unbound variable. +# We use set -x to print commands before running them to help with +# debugging. +set -ex set -e diff --git a/build-full.sh b/build-full.sh index 14666b652..643adfb13 100755 --- a/build-full.sh +++ b/build-full.sh @@ -1,4 +1,9 @@ -#!/bin/bash +#!/bin/bash -u +# We use set -e and bash with -u to bail on first non zero exit code of any +# processes launched or upon any unbound variable. +# We use set -x to print commands before running them to help with +# debugging. +set -ex set -e diff --git a/release-builder.sh b/release-builder.sh index 7c070ac42..eb3fb6c09 100755 --- a/release-builder.sh +++ b/release-builder.sh @@ -1,4 +1,9 @@ -#!/bin/bash +#!/bin/bash -u +# We use set -e and bash with -u to bail on first non zero exit code of any +# processes launched or upon any unbound variable. +# We use set -x to print commands before running them to help with +# debugging. +set -ex set -e From aeece150962987950ef856983cb2bf9c52942baa Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Mon, 3 Mar 2025 09:55:51 +0100 Subject: [PATCH 24/33] [fix] github runner (#451) Co-authored-by: Niq Dudfield --- build-core.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-core.sh b/build-core.sh index be73bd348..f2a4a2368 100755 --- a/build-core.sh +++ b/build-core.sh @@ -30,7 +30,7 @@ fi perl -i -pe "s/^(\\s*)-DBUILD_SHARED_LIBS=OFF/\\1-DBUILD_SHARED_LIBS=OFF\\n\\1-DROCKSDB_BUILD_SHARED=OFF/g" Builds/CMake/deps/Rocksdb.cmake && mv Builds/CMake/deps/WasmEdge.cmake Builds/CMake/deps/WasmEdge.old && echo "find_package(LLVM REQUIRED CONFIG) -message(STATUS \"Found LLVM ${LLVM_PACKAGE_VERSION}\") +message(STATUS \"Found LLVM \${LLVM_PACKAGE_VERSION}\") message(STATUS \"Using LLVMConfig.cmake in: \${LLVM_DIR}\") add_library (wasmedge STATIC IMPORTED GLOBAL) set_target_properties(wasmedge PROPERTIES IMPORTED_LOCATION \${WasmEdge_LIB}) From 88b01514c19f62ba8de89b0b9d84527f0cb04b59 Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Mon, 3 Mar 2025 19:12:13 +0700 Subject: [PATCH 25/33] fix: remove negative rate test failing on MacOS (#452) --- src/test/app/Remit_test.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/test/app/Remit_test.cpp b/src/test/app/Remit_test.cpp index ac527c9db..8b615960f 100644 --- a/src/test/app/Remit_test.cpp +++ b/src/test/app/Remit_test.cpp @@ -2102,9 +2102,10 @@ struct Remit_test : public beast::unit_test::suite std::string result; TER code; }; - std::array testCases = {{ + // We test only rates that that can fit in a STI_UINT32. + // Negative rates can't be serdes so there is no need to test them. + std::array testCases = {{ {0.0, USD(100), "900", tesSUCCESS}, - {-1.0, USD(100), "900", temBAD_TRANSFER_RATE}, {0.9, USD(100), "900", temBAD_TRANSFER_RATE}, {1.0, USD(100), "900", tesSUCCESS}, {1.1, USD(100), "890", tesSUCCESS}, From 73858289832648913246eb3f4ee60757793efc04 Mon Sep 17 00:00:00 2001 From: RichardAH Date: Thu, 6 Mar 2025 17:25:42 +1000 Subject: [PATCH 26/33] Touch Amendment (#294) --- Builds/CMake/RippledCore.cmake | 1 + src/ripple/app/tx/impl/Transactor.cpp | 20 + src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/SField.h | 1 + src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/LedgerFormats.cpp | 1 + src/ripple/protocol/impl/SField.cpp | 1 + src/test/app/Discrepancy_test.cpp | 23 +- src/test/app/Freeze_test.cpp | 27 +- src/test/app/Touch_test.cpp | 1411 ++++++++++++++++++++ src/test/rpc/Subscribe_test.cpp | 205 +++ 11 files changed, 1675 insertions(+), 19 deletions(-) create mode 100644 src/test/app/Touch_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 78843991f..17969c28f 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -755,6 +755,7 @@ if (tests) src/test/app/Taker_test.cpp src/test/app/TheoreticalQuality_test.cpp src/test/app/Ticket_test.cpp + src/test/app/Touch_test.cpp src/test/app/Transaction_ordering_test.cpp src/test/app/TrustAndBalance_test.cpp src/test/app/TxQ_test.cpp diff --git a/src/ripple/app/tx/impl/Transactor.cpp b/src/ripple/app/tx/impl/Transactor.cpp index 180bf64a8..621667896 100644 --- a/src/ripple/app/tx/impl/Transactor.cpp +++ b/src/ripple/app/tx/impl/Transactor.cpp @@ -1079,6 +1079,24 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) //------------------------------------------------------------------------------ +// increment the touch counter on an account +static void +touchAccount(ApplyView& view, AccountID const& id) +{ + if (!view.rules().enabled(featureTouch)) + return; + + std::shared_ptr sle = view.peek(keylet::account(id)); + if (!sle) + return; + + uint64_t tc = + sle->isFieldPresent(sfTouchCount) ? sle->getFieldU64(sfTouchCount) : 0; + + sle->setFieldU64(sfTouchCount, tc + 1); + view.update(sle); +} + static void removeUnfundedOffers( ApplyView& view, @@ -1519,6 +1537,8 @@ Transactor::doTSH( if ((!canRollback && strong) || (canRollback && !strong)) continue; + touchAccount(view, tshAccountID); + auto klTshHook = keylet::hook(tshAccountID); auto tshHook = view.read(klTshHook); diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index b242b2f7f..3c6e625a9 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 77; +static constexpr std::size_t numFeatures = 78; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -362,6 +362,7 @@ extern uint256 const fix240819; extern uint256 const fixPageCap; extern uint256 const fix240911; extern uint256 const fixFloatDivide; +extern uint256 const featureTouch; extern uint256 const fixReduceImport; extern uint256 const fixXahauV3; extern uint256 const fix20250131; diff --git a/src/ripple/protocol/SField.h b/src/ripple/protocol/SField.h index 1f9d15368..0fc34ce09 100644 --- a/src/ripple/protocol/SField.h +++ b/src/ripple/protocol/SField.h @@ -433,6 +433,7 @@ extern SF_UINT64 const sfReferenceCount; extern SF_UINT64 const sfRewardAccumulator; extern SF_UINT64 const sfAccountCount; extern SF_UINT64 const sfAccountIndex; +extern SF_UINT64 const sfTouchCount; // 128-bit extern SF_UINT128 const sfEmailHash; diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 73db671ed..d32950946 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -468,6 +468,7 @@ REGISTER_FIX (fix240819, Supported::yes, VoteBehavior::De REGISTER_FIX (fixPageCap, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fix240911, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fixFloatDivide, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(Touch, Supported::yes, VoteBehavior::DefaultNo); REGISTER_FIX (fixReduceImport, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fixXahauV3, Supported::yes, VoteBehavior::DefaultYes); REGISTER_FIX (fix20250131, Supported::yes, VoteBehavior::DefaultYes); diff --git a/src/ripple/protocol/impl/LedgerFormats.cpp b/src/ripple/protocol/impl/LedgerFormats.cpp index acb07c489..4fbcb961a 100644 --- a/src/ripple/protocol/impl/LedgerFormats.cpp +++ b/src/ripple/protocol/impl/LedgerFormats.cpp @@ -66,6 +66,7 @@ LedgerFormats::LedgerFormats() {sfGovernanceFlags, soeOPTIONAL}, {sfGovernanceMarks, soeOPTIONAL}, {sfAccountIndex, soeOPTIONAL}, + {sfTouchCount, soeOPTIONAL}, }, commonFields); diff --git a/src/ripple/protocol/impl/SField.cpp b/src/ripple/protocol/impl/SField.cpp index a72208607..3dee6b3a4 100644 --- a/src/ripple/protocol/impl/SField.cpp +++ b/src/ripple/protocol/impl/SField.cpp @@ -183,6 +183,7 @@ CONSTRUCT_TYPED_SFIELD(sfEmitBurden, "EmitBurden", UINT64, CONSTRUCT_TYPED_SFIELD(sfHookInstructionCount, "HookInstructionCount", UINT64, 17); CONSTRUCT_TYPED_SFIELD(sfHookReturnCode, "HookReturnCode", UINT64, 18); CONSTRUCT_TYPED_SFIELD(sfReferenceCount, "ReferenceCount", UINT64, 19); +CONSTRUCT_TYPED_SFIELD(sfTouchCount, "TouchCount", UINT64, 97); CONSTRUCT_TYPED_SFIELD(sfAccountIndex, "AccountIndex", UINT64, 98); CONSTRUCT_TYPED_SFIELD(sfAccountCount, "AccountCount", UINT64, 99); CONSTRUCT_TYPED_SFIELD(sfRewardAccumulator, "RewardAccumulator", UINT64, 100); diff --git a/src/test/app/Discrepancy_test.cpp b/src/test/app/Discrepancy_test.cpp index c89432f91..ba08f4a0c 100644 --- a/src/test/app/Discrepancy_test.cpp +++ b/src/test/app/Discrepancy_test.cpp @@ -42,6 +42,8 @@ class Discrepancy_test : public beast::unit_test::suite using namespace test::jtx; Env env{*this, features}; + bool const withTouch = env.current()->rules().enabled(featureTouch); + Account A1{"A1"}; Account A2{"A2"}; Account A3{"A3"}; @@ -107,7 +109,8 @@ class Discrepancy_test : public beast::unit_test::suite auto meta = jrr[jss::meta]; uint64_t sumPrev{0}; uint64_t sumFinal{0}; - BEAST_EXPECT(meta[sfAffectedNodes.fieldName].size() == 9); + BEAST_EXPECT( + meta[sfAffectedNodes.fieldName].size() == withTouch ? 11 : 10); for (auto const& an : meta[sfAffectedNodes.fieldName]) { Json::Value node; @@ -127,12 +130,17 @@ class Discrepancy_test : public beast::unit_test::suite Json::Value finalFields = node.isMember(sfFinalFields.fieldName) ? node[sfFinalFields.fieldName] : node[sfNewFields.fieldName]; - if (prevFields) - sumPrev += beast::lexicalCastThrow( - prevFields[sfBalance.fieldName].asString()); - if (finalFields) - sumFinal += beast::lexicalCastThrow( - finalFields[sfBalance.fieldName].asString()); + + // withTouch: "Touched" account does not update Balance + if (prevFields.isMember(sfBalance.fieldName)) + { + if (prevFields) + sumPrev += beast::lexicalCastThrow( + prevFields[sfBalance.fieldName].asString()); + if (finalFields) + sumFinal += beast::lexicalCastThrow( + finalFields[sfBalance.fieldName].asString()); + } } } // the difference in balances (final and prev) should be the @@ -147,6 +155,7 @@ public: using namespace test::jtx; auto const sa = supported_amendments(); testXRPDiscrepancy(sa - featureFlowCross); + testXRPDiscrepancy(sa - featureTouch); testXRPDiscrepancy(sa); } }; diff --git a/src/test/app/Freeze_test.cpp b/src/test/app/Freeze_test.cpp index 7be1f2569..6402f84c5 100644 --- a/src/test/app/Freeze_test.cpp +++ b/src/test/app/Freeze_test.cpp @@ -60,6 +60,7 @@ class Freeze_test : public beast::unit_test::suite using namespace test::jtx; Env env(*this, features); + bool const withTouch = env.current()->rules().enabled(featureTouch); Account G1{"G1"}; Account alice{"alice"}; @@ -113,7 +114,7 @@ class Freeze_test : public beast::unit_test::suite env(trust(G1, bob["USD"](0), tfSetFreeze)); auto affected = env.meta()->getJson( JsonOptions::none)[sfAffectedNodes.fieldName]; - if (!BEAST_EXPECT(checkArraySize(affected, 2u))) + if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 3u : 2u))) return; auto ff = affected[1u][sfModifiedNode.fieldName][sfFinalFields.fieldName]; @@ -131,10 +132,10 @@ class Freeze_test : public beast::unit_test::suite env(offer(bob, G1["USD"](5), XRP(25))); auto affected = env.meta()->getJson( JsonOptions::none)[sfAffectedNodes.fieldName]; - if (!BEAST_EXPECT(checkArraySize(affected, 5u))) + if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 6u : 5u))) return; - auto ff = - affected[3u][sfModifiedNode.fieldName][sfFinalFields.fieldName]; + auto ff = affected[withTouch ? 4u : 3u][sfModifiedNode.fieldName] + [sfFinalFields.fieldName]; BEAST_EXPECT( ff[sfHighLimit.fieldName] == bob["USD"](100).value().getJson(JsonOptions::none)); @@ -199,7 +200,7 @@ class Freeze_test : public beast::unit_test::suite env(trust(G1, bob["USD"](0), tfClearFreeze)); auto affected = env.meta()->getJson( JsonOptions::none)[sfAffectedNodes.fieldName]; - if (!BEAST_EXPECT(checkArraySize(affected, 2u))) + if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 3u : 2u))) return; auto ff = affected[1u][sfModifiedNode.fieldName][sfFinalFields.fieldName]; @@ -377,6 +378,7 @@ class Freeze_test : public beast::unit_test::suite using namespace test::jtx; Env env(*this, features); + bool const withTouch = env.current()->rules().enabled(featureTouch); Account G1{"G1"}; Account A1{"A1"}; @@ -417,7 +419,7 @@ class Freeze_test : public beast::unit_test::suite env(trust(G1, A1["USD"](0), tfSetFreeze)); auto affected = env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName]; - if (!BEAST_EXPECT(checkArraySize(affected, 1u))) + if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 2u : 1u))) return; auto let = @@ -432,6 +434,7 @@ class Freeze_test : public beast::unit_test::suite using namespace test::jtx; Env env(*this, features); + bool const withTouch = env.current()->rules().enabled(featureTouch); Account G1{"G1"}; Account A2{"A2"}; @@ -475,7 +478,7 @@ class Freeze_test : public beast::unit_test::suite env(trust(G1, A3["USD"](0), tfSetFreeze)); auto affected = env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName]; - if (!BEAST_EXPECT(checkArraySize(affected, 2u))) + if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 3u : 2u))) return; auto ff = affected[1u][sfModifiedNode.fieldName][sfFinalFields.fieldName]; @@ -505,9 +508,10 @@ class Freeze_test : public beast::unit_test::suite env(trust(G1, A4["USD"](0), tfSetFreeze)); affected = env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName]; - if (!BEAST_EXPECT(checkArraySize(affected, 2u))) + if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 3u : 2u))) return; - ff = affected[0u][sfModifiedNode.fieldName][sfFinalFields.fieldName]; + ff = affected[withTouch ? 1u : 0u][sfModifiedNode.fieldName] + [sfFinalFields.fieldName]; BEAST_EXPECT( ff[sfLowLimit.fieldName] == G1["USD"](0).value().getJson(JsonOptions::none)); @@ -521,7 +525,7 @@ class Freeze_test : public beast::unit_test::suite env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName]; if (!BEAST_EXPECT(checkArraySize(affected, 8u))) return; - auto created = affected[0u][sfCreatedNode.fieldName]; + auto created = affected[5u][sfCreatedNode.fieldName]; BEAST_EXPECT( created[sfNewFields.fieldName][jss::Account] == A2.human()); env.close(); @@ -543,8 +547,9 @@ public: testOffersWhenFrozen(features); }; using namespace test::jtx; - auto const sa = supported_amendments() - featureXahauGenesis; + auto const sa = supported_amendments(); testAll(sa - featureFlowCross); + testAll(sa - featureTouch); testAll(sa); } }; diff --git a/src/test/app/Touch_test.cpp b/src/test/app/Touch_test.cpp new file mode 100644 index 000000000..2d59a8cd4 --- /dev/null +++ b/src/test/app/Touch_test.cpp @@ -0,0 +1,1411 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2025 XRPL-Labs + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS S + OFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace test { + +struct Touch_test : public beast::unit_test::suite +{ +private: + struct TestLedgerData + { + std::string txType; + std::string result; + }; + + void + validateTouch( + jtx::Env& env, + jtx::Account const& account, + TestLedgerData const& testCase) + { + Json::Value params; + params[jss::account] = account.human(); + params[jss::limit] = 1; + params[jss::ledger_index_min] = -1; + params[jss::ledger_index_max] = -1; + auto const jrr = env.rpc("json", "account_tx", to_string(params)); + auto const transactions = jrr[jss::result][jss::transactions]; + BEAST_EXPECT(transactions.size() == 1); + BEAST_EXPECT( + transactions[0u][jss::tx][jss::TransactionType] == testCase.txType); + BEAST_EXPECT( + transactions[0u][jss::meta][sfTransactionResult.jsonName] == + testCase.result); + } + + void + testAccountSet(FeatureBitset features) + { + using namespace test::jtx; + using namespace std::literals; + testcase("account set"); + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + env.fund(XRP(1000), alice); + env.close(); + + // alice set + env(fset(alice, asfDefaultRipple), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"AccountSet", "tesSUCCESS"}); + } + + void + testAccountDelete(FeatureBitset features) + { + using namespace test::jtx; + using namespace std::literals; + testcase("account delete"); + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // AccountDelete + incLgrSeqForAccDel(env, alice); + env(acctdelete(alice, bob), + fee(env.current()->fees().reserve), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"AccountDelete", "tesSUCCESS"}); + validateTouch(env, bob, {"AccountDelete", "tesSUCCESS"}); + } + + static uint256 + getCheckIndex(AccountID const& alice, std::uint32_t uSequence) + { + return keylet::check(alice, uSequence).key; + } + + void + testCheckCancel(FeatureBitset features) + { + testcase("check cancel"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // create check + uint256 const checkId{getCheckIndex(alice, env.seq(alice))}; + env(check::create(alice, bob, XRP(100)), ter(tesSUCCESS)); + env.close(); + + // cancel check + env(check::cancel(alice, checkId), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"CheckCancel", "tesSUCCESS"}); + validateTouch(env, bob, {"CheckCancel", "tesSUCCESS"}); + } + + void + testCheckCash(FeatureBitset features) + { + testcase("check cash"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // create check + uint256 const checkId{getCheckIndex(alice, env.seq(alice))}; + env(check::create(alice, bob, XRP(100)), ter(tesSUCCESS)); + env.close(); + + // cash check + env(check::cash(bob, checkId, XRP(100)), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"CheckCash", "tesSUCCESS"}); + validateTouch(env, bob, {"CheckCash", "tesSUCCESS"}); + } + + void + testCheckCreate(FeatureBitset features) + { + testcase("check create"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // create check + env(check::create(alice, bob, XRP(100)), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"CheckCreate", "tesSUCCESS"}); + validateTouch(env, bob, {"CheckCreate", "tesSUCCESS"}); + } + + void + testClaimReward(FeatureBitset features) + { + testcase("claim reward"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const issuer = Account("issuer"); + env.fund(XRP(1000), alice, issuer); + env.close(); + + // claim reward + env(reward::claim(alice), reward::issuer(issuer), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"ClaimReward", "tesSUCCESS"}); + auto const tt = env.current()->rules().enabled(featureTouch) + ? "ClaimReward" + : "AccountSet"; + validateTouch(env, issuer, {tt, "tesSUCCESS"}); + } + + void + testDepositPreauth(FeatureBitset features) + { + testcase("deposit preauth"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // require authorization for deposits. + env(fset(alice, asfDepositAuth)); + + // deposit preauth + env(deposit::auth(alice, bob), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"DepositPreauth", "tesSUCCESS"}); + validateTouch(env, bob, {"DepositPreauth", "tesSUCCESS"}); + } + + void + testEscrowCancel(FeatureBitset features) + { + testcase("escrow cancel"); + + using namespace jtx; + using namespace std::chrono; + using namespace std::literals; + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // create escrow + auto const seq1 = env.seq(alice); + NetClock::time_point const finishTime = env.now() + 1s; + NetClock::time_point const cancelTime = env.now() + 2s; + auto createTx = escrow::create(alice, bob, XRP(10)); + createTx[sfFinishAfter.jsonName] = + finishTime.time_since_epoch().count(); + createTx[sfCancelAfter.jsonName] = + cancelTime.time_since_epoch().count(); + env(createTx, ter(tesSUCCESS)); + env.close(); + + // cancel escrow + env(escrow::cancel(alice, alice, seq1), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"EscrowCancel", "tesSUCCESS"}); + validateTouch(env, bob, {"EscrowCancel", "tesSUCCESS"}); + } + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // create escrow + auto const seq1 = env.seq(alice); + NetClock::time_point const finishTime = env.now() + 1s; + NetClock::time_point const cancelTime = env.now() + 2s; + auto createTx = escrow::create(alice, bob, XRP(10)); + createTx[sfFinishAfter.jsonName] = + finishTime.time_since_epoch().count(); + createTx[sfCancelAfter.jsonName] = + cancelTime.time_since_epoch().count(); + env(createTx, ter(tesSUCCESS)); + env.close(); + + // cancel escrow + env(escrow::cancel(bob, alice, seq1), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"EscrowCancel", "tesSUCCESS"}); + validateTouch(env, bob, {"EscrowCancel", "tesSUCCESS"}); + } + } + + void + testEscrowCreate(FeatureBitset features) + { + testcase("escrow create"); + + using namespace jtx; + using namespace std::chrono; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // create escrow + NetClock::time_point const finishTime = env.now() + 1s; + NetClock::time_point const cancelTime = env.now() + 2s; + auto createTx = escrow::create(alice, bob, XRP(10)); + createTx[sfFinishAfter.jsonName] = + finishTime.time_since_epoch().count(); + createTx[sfCancelAfter.jsonName] = + cancelTime.time_since_epoch().count(); + env(createTx, ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"EscrowCreate", "tesSUCCESS"}); + validateTouch(env, bob, {"EscrowCreate", "tesSUCCESS"}); + } + + void + testEscrowFinish(FeatureBitset features) + { + testcase("escrow finish"); + + using namespace jtx; + using namespace std::chrono; + using namespace std::literals; + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // create escrow + auto const seq1 = env.seq(alice); + NetClock::time_point const finishTime = env.now() + 1s; + auto createTx = escrow::create(alice, bob, XRP(10)); + createTx[sfFinishAfter.jsonName] = + finishTime.time_since_epoch().count(); + env(createTx, ter(tesSUCCESS)); + env.close(); + + // finish escrow + env(escrow::finish(alice, alice, seq1), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"EscrowFinish", "tesSUCCESS"}); + validateTouch(env, bob, {"EscrowFinish", "tesSUCCESS"}); + } + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // create escrow + auto const seq1 = env.seq(alice); + NetClock::time_point const finishTime = env.now() + 1s; + auto createTx = escrow::create(alice, bob, XRP(10)); + createTx[sfFinishAfter.jsonName] = + finishTime.time_since_epoch().count(); + env(createTx, ter(tesSUCCESS)); + env.close(); + + // finish escrow + env(escrow::finish(bob, alice, seq1), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"EscrowFinish", "tesSUCCESS"}); + validateTouch(env, bob, {"EscrowFinish", "tesSUCCESS"}); + } + } + + void + testGenesisMint(FeatureBitset features) + { + testcase("genesis mint"); + + using namespace jtx; + using namespace std::chrono; + using namespace std::literals; + + test::jtx::Env env{ + *this, + network::makeNetworkConfig(21337, "10", "1000000", "200000"), + features}; + + auto const alice = Account("alice"); + auto const issuer = env.master; + auto const bene = Account("bob"); + env.fund(XRP(1000), alice, bene); + env.close(); + + // burn down the total ledger coins so that genesis mints don't mint + // above 100B tripping invariant + env(noop(issuer), fee(XRP(10'000'000ULL))); + env.close(); + + // set mint hook on master + env(hook(issuer, {{hso(genesis::MintTestHook, overrideFlag)}}, 0), + fee(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + env(invoke::invoke( + alice, + issuer, + genesis::makeBlob({ + {bene.id(), XRP(123).value(), std::nullopt, std::nullopt}, + })), + fee(XRP(10)), + ter(tesSUCCESS)); + env.close(); + env.close(); + + // verify touch + validateTouch(env, alice, {"Invoke", "tesSUCCESS"}); + validateTouch(env, issuer, {"GenesisMint", "tesSUCCESS"}); + validateTouch(env, bene, {"GenesisMint", "tesSUCCESS"}); + } + + void + testImport(FeatureBitset features) + { + testcase("import"); + + using namespace test::jtx; + using namespace std::literals; + + std::vector const keys = { + "ED74D4036C6591A4BDF9C54CEFA39B996A5DCE5F86D11FDA1874481CE9D5A1CDC" + "1"}; + + test::jtx::Env env{ + *this, + network::makeNetworkVLConfig( + 21337, keys, "10", "1000000", "200000"), + features}; + + auto const alice = Account("alice"); + auto const issuer = Account("bob"); + env.fund(XRP(1000), alice, issuer); + env.close(); + + // burn down the total ledger coins so that genesis mints don't mint + // above 100B tripping invariant + env(noop(env.master), fee(XRP(10'000'000ULL))); + env.close(); + + // import + env(import::import(alice, import::loadXpop(ImportTCAccountSet::w_seed)), + import::issuer(issuer), + fee(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Import", "tesSUCCESS"}); + auto const tt = env.current()->rules().enabled(featureTouch) + ? "Import" + : "AccountSet"; + validateTouch(env, issuer, {tt, "tesSUCCESS"}); + } + + void + testInvoke(FeatureBitset features) + { + testcase("invoke"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + env.fund(XRP(1000), alice, bob); + env.close(); + + // ttINVOKE + env(invoke::invoke(alice), invoke::dest(bob), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Invoke", "tesSUCCESS"}); + auto const tt = env.current()->rules().enabled(featureTouch) + ? "Invoke" + : "AccountSet"; + validateTouch(env, bob, {tt, "tesSUCCESS"}); + } + + void + testOfferCancel(FeatureBitset features) + { + testcase("offer cancel"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(1000), alice, gw); + env.close(); + + // gw create offer + env(offer(gw, USD(1000), XRP(1000))); + env.close(); + + // create offer + auto const offerSeq = env.seq(alice); + env(offer(alice, USD(1000), XRP(1000)), ter(tesSUCCESS)); + env.close(); + + // cancel offer + env(offer_cancel(alice, offerSeq), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"OfferCancel", "tesSUCCESS"}); + } + + void + testOfferCreate(FeatureBitset features) + { + testcase("offer create"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const gw = Account{"gateway"}; + auto const USD = gw["USD"]; + env.fund(XRP(1000), alice, gw); + env.close(); + + // gw create offer + env(offer(gw, USD(1000), XRP(1000))); + env.close(); + + // create offer + env(offer(alice, USD(1000), XRP(1000)), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, gw, {"OfferCreate", "tesSUCCESS"}); + validateTouch(env, alice, {"OfferCreate", "tesSUCCESS"}); + } + + void + testPayment(FeatureBitset features) + { + testcase("payment"); + + using namespace test::jtx; + using namespace std::literals; + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + env.fund(XRP(1000), alice, bob); + env.close(); + + // payment + env(pay(alice, bob, XRP(1)), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Payment", "tesSUCCESS"}); + validateTouch(env, bob, {"Payment", "tesSUCCESS"}); + } + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + auto const gw = Account{"gw"}; + auto const USD = gw["USD"]; + env.fund(XRP(1000), alice, bob, gw); + env.close(); + env(trust(alice, USD(100)), ter(tesSUCCESS)); + env(trust(bob, USD(100)), ter(tesSUCCESS)); + env.close(); + env(pay(gw, alice, USD(100)), ter(tesSUCCESS)); + env.close(); + + // payment + env(pay(alice, bob, USD(1)), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Payment", "tesSUCCESS"}); + validateTouch(env, bob, {"Payment", "tesSUCCESS"}); + validateTouch(env, gw, {"Payment", "tesSUCCESS"}); + } + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const carol = Account("carol"); + env.fund(XRP(1000), alice, bob, carol); + env.close(); + + // setup rippling + auto const USDA = alice["USD"]; + auto const USDB = bob["USD"]; + auto const USDC = carol["USD"]; + env.trust(USDA(10), bob); + env.trust(USDB(10), carol); + + // payment + env(pay(alice, carol, USDB(10)), paths(USDA)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Payment", "tesSUCCESS"}); + validateTouch(env, bob, {"Payment", "tesSUCCESS"}); + validateTouch(env, carol, {"Payment", "tesSUCCESS"}); + } + } + + static uint256 + channel( + jtx::Account const& alice, + jtx::Account const& dst, + std::uint32_t seqProxyValue) + { + auto const k = keylet::payChan(alice, dst, seqProxyValue); + return k.key; + } + + static Buffer + signClaimAuth( + PublicKey const& pk, + SecretKey const& sk, + uint256 const& channel, + STAmount const& authAmt) + { + Serializer msg; + serializePayChanAuthorization(msg, channel, authAmt.xrp()); + return sign(pk, sk, msg.slice()); + } + + void + testPaymentChannelClaim(FeatureBitset features) + { + testcase("payment channel claim"); + + using namespace test::jtx; + using namespace std::literals; + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + env.fund(XRP(1000), alice, bob); + env.close(); + + // create paychannel + auto const pk = alice.pk(); + auto const settleDelay = 100s; + auto const chan = channel(alice, bob, env.seq(alice)); + env(paychan::create(alice, bob, XRP(10), settleDelay, pk), + ter(tesSUCCESS)); + env.close(); + + auto const delta = XRP(1); + auto const reqBal = delta; + auto const authAmt = reqBal + XRP(1); + + // claim paychannel + env(paychan::claim(alice, chan, reqBal, authAmt), + txflags(tfClose), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"PaymentChannelClaim", "tesSUCCESS"}); + validateTouch(env, bob, {"PaymentChannelClaim", "tesSUCCESS"}); + } + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + env.fund(XRP(1000), alice, bob); + env.close(); + + // create paychannel + auto const pk = alice.pk(); + auto const settleDelay = 100s; + auto const chan = channel(alice, bob, env.seq(alice)); + env(paychan::create(alice, bob, XRP(10), settleDelay, pk), + ter(tesSUCCESS)); + env.close(); + + auto const delta = XRP(1); + auto const reqBal = delta; + auto const authAmt = reqBal + XRP(1); + + // claim paychannel + auto const sig = + signClaimAuth(alice.pk(), alice.sk(), chan, authAmt); + env(paychan::claim( + bob, chan, reqBal, authAmt, Slice(sig), alice.pk()), + txflags(tfClose), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"PaymentChannelClaim", "tesSUCCESS"}); + validateTouch(env, bob, {"PaymentChannelClaim", "tesSUCCESS"}); + } + } + + void + testPaymentChannelCreate(FeatureBitset features) + { + testcase("payment channel create"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + env.fund(XRP(1000), alice, bob); + env.close(); + + // create paychannel + auto const pk = alice.pk(); + auto const settleDelay = 100s; + env(paychan::create(alice, bob, XRP(10), settleDelay, pk), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"PaymentChannelCreate", "tesSUCCESS"}); + validateTouch(env, bob, {"PaymentChannelCreate", "tesSUCCESS"}); + } + + void + testPaymentChannelFund(FeatureBitset features) + { + testcase("payment channel fund"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + env.fund(XRP(1000), alice, bob); + env.close(); + + // create paychannel + auto const pk = alice.pk(); + auto const settleDelay = 100s; + auto const chan = channel(alice, bob, env.seq(alice)); + env(paychan::create(alice, bob, XRP(10), settleDelay, pk), + ter(tesSUCCESS)); + env.close(); + + // fund paychannel + env(paychan::fund(alice, chan, XRP(1)), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"PaymentChannelFund", "tesSUCCESS"}); + } + + // helper + void static overrideFlag(Json::Value& jv) + { + jv[jss::Flags] = hsfOVERRIDE; + } + + void + testSetHook(FeatureBitset features) + { + testcase("set hook"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + env.fund(XRP(1000), alice); + env.close(); + + // set tsh hook + auto hook1 = hso(jtx::genesis::AcceptHook, overrideFlag); + hook1[jss::HookOn] = + "00000000000000000000000000000000000000000000000000000000004000" + "00"; + env(hook(alice, {{hook1}}, 0), fee(XRP(1)), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"SetHook", "tesSUCCESS"}); + } + + void + testSetRegularKey(FeatureBitset features) + { + testcase("set regular key"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + env.fund(XRP(1000), alice, bob); + env.close(); + + // set regular key + env(regkey(alice, bob), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"SetRegularKey", "tesSUCCESS"}); + validateTouch(env, bob, {"SetRegularKey", "tesSUCCESS"}); + } + + void + testSignersListSet(FeatureBitset features) + { + testcase("signers list set"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const signer1 = Account{"bob"}; + auto const signer2 = Account{"carol"}; + env.fund(XRP(1000), alice, signer1, signer2); + env.close(); + + // signers list set + env(signers(alice, 2, {{signer1, 1}, {signer2, 1}}), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"SignerListSet", "tesSUCCESS"}); + auto const tt = env.current()->rules().enabled(featureTouch) + ? "SignerListSet" + : "AccountSet"; + validateTouch(env, signer1, {tt, "tesSUCCESS"}); + validateTouch(env, signer2, {tt, "tesSUCCESS"}); + } + + void + testTicketCreate(FeatureBitset features) + { + testcase("ticket create"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + env.fund(XRP(1000), alice); + env.close(); + + // ticket create + env(ticket::create(alice, 2), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"TicketCreate", "tesSUCCESS"}); + } + + void + testTrustSet(FeatureBitset features) + { + testcase("trust set"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const issuer = Account{"gw"}; + auto const USD = issuer["USD"]; + env.fund(XRP(1000), alice, issuer); + env.close(); + + // trust set + env(trust(alice, USD(1000)), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"TrustSet", "tesSUCCESS"}); + validateTouch(env, issuer, {"TrustSet", "tesSUCCESS"}); + } + + void + testURITokenMint(FeatureBitset features) + { + testcase("uritoken mint"); + + using namespace test::jtx; + using namespace std::literals; + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const issuer = Account("alice"); + auto const buyer = Account("carol"); + env.fund(XRP(1000), issuer, buyer); + env.close(); + + std::string const uri(2, '?'); + auto const tid = uritoken::tokenid(issuer, uri); + std::string const hexid{strHex(tid)}; + + // mint uritoken + env(uritoken::mint(issuer, uri), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, issuer, {"URITokenMint", "tesSUCCESS"}); + } + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const issuer = Account("alice"); + auto const buyer = Account("carol"); + env.fund(XRP(1000), issuer, buyer); + env.close(); + + std::string const uri(2, '?'); + auto const tid = uritoken::tokenid(issuer, uri); + std::string const hexid{strHex(tid)}; + + // mint uritoken + env(uritoken::mint(issuer, uri), + uritoken::dest(buyer), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, buyer, {"URITokenMint", "tesSUCCESS"}); + validateTouch(env, issuer, {"URITokenMint", "tesSUCCESS"}); + } + } + + void + testURITokenBurn(FeatureBitset features) + { + testcase("uritoken burn"); + + using namespace test::jtx; + using namespace std::literals; + + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const issuer = Account("alice"); + auto const owner = Account("bob"); + env.fund(XRP(1000), issuer, owner); + env.close(); + + std::string const uri(2, '?'); + auto const tid = uritoken::tokenid(issuer, uri); + std::string const hexid{strHex(tid)}; + + // mint uritoken + env(uritoken::mint(issuer, uri), + uritoken::dest(owner), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // buy uritoken + env(uritoken::buy(owner, hexid), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // ttURITOKEN_BURN + env(uritoken::burn(owner, hexid), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, owner, {"URITokenBurn", "tesSUCCESS"}); + validateTouch(env, issuer, {"URITokenBurn", "tesSUCCESS"}); + } + + // Issuer + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const issuer = Account("alice"); + auto const owner = Account("bob"); + env.fund(XRP(1000), issuer, owner); + env.close(); + + std::string const uri(2, '?'); + auto const tid = uritoken::tokenid(issuer, uri); + std::string const hexid{strHex(tid)}; + + // mint uritoken + env(uritoken::mint(issuer, uri), + uritoken::dest(owner), + uritoken::amt(XRP(1)), + txflags(tfBurnable), + ter(tesSUCCESS)); + env.close(); + + // buy uritoken + env(uritoken::buy(owner, hexid), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // ttURITOKEN_BURN + env(uritoken::burn(issuer, hexid), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, owner, {"URITokenBurn", "tesSUCCESS"}); + validateTouch(env, issuer, {"URITokenBurn", "tesSUCCESS"}); + } + } + + void + testURITokenBuy(FeatureBitset features) + { + testcase("uritoken buy"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const issuer = Account("alice"); + auto const owner = Account("bob"); + auto const buyer = Account("carol"); + env.fund(XRP(1000), issuer, owner, buyer); + env.close(); + + std::string const uri(2, '?'); + auto const tid = uritoken::tokenid(issuer, uri); + std::string const hexid{strHex(tid)}; + + // mint uritoken + env(uritoken::mint(issuer, uri), + uritoken::dest(owner), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // buy uritoken + env(uritoken::buy(owner, hexid), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // sell uritoken + env(uritoken::sell(owner, hexid), + uritoken::dest(buyer), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // buy uritoken + env(uritoken::buy(buyer, hexid), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, buyer, {"URITokenBuy", "tesSUCCESS"}); + validateTouch(env, issuer, {"URITokenBuy", "tesSUCCESS"}); + } + + void + testURITokenCancelSellOffer(FeatureBitset features) + { + testcase("uritoken cancel sell offer"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const issuer = Account("alice"); + auto const owner = Account("bob"); + auto const buyer = Account("carol"); + env.fund(XRP(1000), issuer, owner, buyer); + env.close(); + + std::string const uri(2, '?'); + auto const tid = uritoken::tokenid(issuer, uri); + std::string const hexid{strHex(tid)}; + + // mint uritoken + env(uritoken::mint(issuer, uri), + uritoken::dest(owner), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // buy uritoken + env(uritoken::buy(owner, hexid), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // sell uritoken + env(uritoken::sell(owner, hexid), + uritoken::dest(buyer), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // cancel uritoken + env(uritoken::cancel(owner, hexid), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, owner, {"URITokenCancelSellOffer", "tesSUCCESS"}); + validateTouch(env, issuer, {"URITokenCancelSellOffer", "tesSUCCESS"}); + } + + void + testURITokenCreateSellOffer(FeatureBitset features) + { + testcase("uritoken create sell offer"); + + using namespace test::jtx; + using namespace std::literals; + + test::jtx::Env env{*this, envconfig(), features}; + + auto const issuer = Account("alice"); + auto const owner = Account("bob"); + auto const buyer = Account("carol"); + env.fund(XRP(1000), issuer, owner, buyer); + env.close(); + + std::string const uri(2, '?'); + auto const tid = uritoken::tokenid(issuer, uri); + std::string const hexid{strHex(tid)}; + + // mint uritoken + env(uritoken::mint(issuer, uri), + uritoken::dest(owner), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // buy uritoken + env(uritoken::buy(owner, hexid), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // sell uritoken + env(uritoken::sell(owner, hexid), + uritoken::dest(buyer), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, owner, {"URITokenCreateSellOffer", "tesSUCCESS"}); + validateTouch(env, buyer, {"URITokenCreateSellOffer", "tesSUCCESS"}); + validateTouch(env, issuer, {"URITokenCreateSellOffer", "tesSUCCESS"}); + } + + void + testRemit(FeatureBitset features) + { + testcase("remit"); + + using namespace test::jtx; + using namespace std::literals; + + // No Amount + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + env.fund(XRP(1000), alice, bob); + env.close(); + + // remit + env(remit::remit(alice, bob), ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Remit", "tesSUCCESS"}); + auto const tt = env.current()->rules().enabled(featureTouch) + ? "Remit" + : "AccountSet"; + validateTouch(env, bob, {tt, "tesSUCCESS"}); + } + + // IOU + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + auto const gw = Account{"gw"}; + auto const USD = gw["USD"]; + env.fund(XRP(1000), alice, bob, gw); + env.close(); + env(trust(alice, USD(100)), ter(tesSUCCESS)); + env(trust(bob, USD(100)), ter(tesSUCCESS)); + env.close(); + env(pay(gw, alice, USD(100)), ter(tesSUCCESS)); + env.close(); + + // remit + env(remit::remit(alice, bob), + remit::amts({USD(1)}), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Remit", "tesSUCCESS"}); + validateTouch(env, bob, {"Remit", "tesSUCCESS"}); + validateTouch(env, gw, {"Remit", "tesSUCCESS"}); + } + + // Inform + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + auto const inform = Account{"inform"}; + env.fund(XRP(1000), alice, bob, inform); + env.close(); + + // remit + env(remit::remit(alice, bob), + remit::inform(inform), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Remit", "tesSUCCESS"}); + auto const tt = env.current()->rules().enabled(featureTouch) + ? "Remit" + : "AccountSet"; + validateTouch(env, bob, {tt, "tesSUCCESS"}); + validateTouch(env, inform, {tt, "tesSUCCESS"}); + } + + // URITokenIDs + { + test::jtx::Env env{*this, envconfig(), features}; + + auto const alice = Account("alice"); + auto const bob = Account{"bob"}; + auto const issuer = Account{"issuer"}; + env.fund(XRP(1000), alice, bob, issuer); + env.close(); + + // mint uritoken + std::string const uri(maxTokenURILength, '?'); + auto const tid = uritoken::tokenid(issuer, uri); + env(uritoken::mint(issuer, uri), + txflags(tfBurnable), + ter(tesSUCCESS)); + + // sell uritoken + env(uritoken::sell(issuer, strHex(tid)), + uritoken::amt(XRP(1)), + uritoken::dest(alice), + ter(tesSUCCESS)); + env.close(); + + // buy uritoken + env(uritoken::buy(alice, strHex(tid)), + uritoken::amt(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // remit + env(remit::remit(alice, bob), + remit::token_ids({strHex(tid)}), + ter(tesSUCCESS)); + env.close(); + + // verify touch + validateTouch(env, alice, {"Remit", "tesSUCCESS"}); + validateTouch(env, bob, {"Remit", "tesSUCCESS"}); + validateTouch(env, issuer, {"Remit", "tesSUCCESS"}); + } + } + + void + testAllTxns(FeatureBitset features) + { + testAccountSet(features); + testAccountDelete(features); + testCheckCancel(features); + testCheckCash(features); + testCheckCreate(features); + testClaimReward(features); + testDepositPreauth(features); + testEscrowCancel(features); + testEscrowCreate(features); + testEscrowFinish(features); + testGenesisMint(features); + testImport(features); + testInvoke(features); + testOfferCancel(features); + testOfferCreate(features); + testPayment(features); + testPaymentChannelClaim(features); + testPaymentChannelCreate(features); + testPaymentChannelFund(features); + testSetHook(features); + testSetRegularKey(features); + testSignersListSet(features); + testTicketCreate(features); + testTrustSet(features); + testURITokenMint(features); + testURITokenBurn(features); + testURITokenBuy(features); + testURITokenCancelSellOffer(features); + testURITokenCreateSellOffer(features); + testRemit(features); + } + +public: + void + run() override + { + using namespace test::jtx; + auto const sa = supported_amendments(); + testAllTxns(sa - featureTouch); + testAllTxns(sa); + } +}; + +BEAST_DEFINE_TESTSUITE(Touch, app, ripple); + +} // namespace test +} // namespace ripple \ No newline at end of file diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index 7a6f840fd..e359685b8 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -1138,6 +1138,209 @@ public: } } + const std::vector TshHook = { + 0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x28U, + 0x06U, 0x60U, 0x05U, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x01U, 0x7EU, + 0x60U, 0x04U, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x01U, 0x7EU, 0x60U, 0x00U, + 0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x60U, + 0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU, 0x01U, 0x7EU, + 0x02U, 0x45U, 0x05U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x05U, 0x74U, 0x72U, + 0x61U, 0x63U, 0x65U, 0x00U, 0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, + 0x6FU, 0x74U, 0x78U, 0x6EU, 0x5FU, 0x70U, 0x61U, 0x72U, 0x61U, 0x6DU, + 0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x68U, 0x6FU, 0x6FU, + 0x6BU, 0x5FU, 0x61U, 0x67U, 0x61U, 0x69U, 0x6EU, 0x00U, 0x02U, 0x03U, + 0x65U, 0x6EU, 0x76U, 0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U, + 0x00U, 0x03U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U, + 0x04U, 0x03U, 0x02U, 0x01U, 0x05U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U, + 0x06U, 0x2BU, 0x07U, 0x7FU, 0x01U, 0x41U, 0xC0U, 0x8BU, 0x04U, 0x0BU, + 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0xBCU, + 0x0BU, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, + 0x41U, 0xC0U, 0x8BU, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x00U, 0x0BU, + 0x7FU, 0x00U, 0x41U, 0x01U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U, + 0x6FU, 0x6FU, 0x6BU, 0x00U, 0x05U, 0x0AU, 0x8EU, 0x84U, 0x00U, 0x01U, + 0x8AU, 0x84U, 0x00U, 0x02U, 0x09U, 0x7EU, 0x05U, 0x7FU, 0x02U, 0x40U, + 0x02U, 0x40U, 0x23U, 0x00U, 0x21U, 0x0AU, 0x20U, 0x0AU, 0x41U, 0x10U, + 0x6BU, 0x21U, 0x0AU, 0x20U, 0x0AU, 0x24U, 0x00U, 0x20U, 0x0AU, 0x20U, + 0x00U, 0x36U, 0x02U, 0x0CU, 0x41U, 0x9EU, 0x0BU, 0x41U, 0x0FU, 0x41U, + 0xC1U, 0x09U, 0x41U, 0x0EU, 0x41U, 0x00U, 0x10U, 0x00U, 0x21U, 0x02U, + 0x20U, 0x02U, 0x1AU, 0x20U, 0x0AU, 0x41U, 0x0BU, 0x6AU, 0x21U, 0x00U, + 0x20U, 0x00U, 0x41U, 0x01U, 0x41U, 0xBDU, 0x09U, 0x41U, 0x03U, 0x10U, + 0x01U, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0x01U, 0x51U, 0x21U, 0x00U, + 0x20U, 0x00U, 0x41U, 0x01U, 0x71U, 0x21U, 0x00U, 0x20U, 0x00U, 0x45U, + 0x21U, 0x00U, 0x20U, 0x00U, 0x45U, 0x21U, 0x00U, 0x0BU, 0x20U, 0x00U, + 0x04U, 0x40U, 0x02U, 0x40U, 0x02U, 0x40U, 0x10U, 0x02U, 0x21U, 0x03U, + 0x20U, 0x03U, 0x1AU, 0x0BU, 0x01U, 0x0BU, 0x05U, 0x01U, 0x0BU, 0x0BU, + 0x02U, 0x7EU, 0x02U, 0x40U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x0CU, 0x21U, + 0x00U, 0x02U, 0x40U, 0x02U, 0x40U, 0x02U, 0x40U, 0x02U, 0x40U, 0x02U, + 0x40U, 0x20U, 0x00U, 0x0EU, 0x03U, 0x02U, 0x01U, 0x00U, 0x04U, 0x0BU, + 0x02U, 0x40U, 0x02U, 0x40U, 0x02U, 0x40U, 0x41U, 0xDBU, 0x09U, 0x41U, + 0xC3U, 0x00U, 0x41U, 0x80U, 0x08U, 0x41U, 0xC2U, 0x00U, 0x41U, 0x00U, + 0x10U, 0x00U, 0x21U, 0x04U, 0x20U, 0x04U, 0x1AU, 0x0BU, 0x0CU, 0x06U, + 0x0BU, 0x00U, 0x0BU, 0x00U, 0x0BU, 0x02U, 0x40U, 0x02U, 0x40U, 0x02U, + 0x40U, 0x41U, 0x9FU, 0x0AU, 0x41U, 0x3DU, 0x41U, 0xC2U, 0x08U, 0x41U, + 0x3CU, 0x41U, 0x00U, 0x10U, 0x00U, 0x21U, 0x05U, 0x20U, 0x05U, 0x1AU, + 0x0BU, 0x0CU, 0x05U, 0x0BU, 0x00U, 0x0BU, 0x00U, 0x0BU, 0x02U, 0x40U, + 0x02U, 0x40U, 0x02U, 0x40U, 0x41U, 0xDDU, 0x0AU, 0x41U, 0xC0U, 0x00U, + 0x41U, 0xFEU, 0x08U, 0x41U, 0x3FU, 0x41U, 0x00U, 0x10U, 0x00U, 0x21U, + 0x06U, 0x20U, 0x06U, 0x1AU, 0x0BU, 0x01U, 0x0BU, 0x0BU, 0x0BU, 0x0BU, + 0x0BU, 0x02U, 0x7EU, 0x02U, 0x7EU, 0x41U, 0xAEU, 0x0BU, 0x41U, 0x0DU, + 0x41U, 0xCFU, 0x09U, 0x41U, 0x0CU, 0x41U, 0x00U, 0x10U, 0x00U, 0x21U, + 0x07U, 0x20U, 0x07U, 0x1AU, 0x20U, 0x0AU, 0x41U, 0x07U, 0x6AU, 0x21U, + 0x0CU, 0x20U, 0x0CU, 0x21U, 0x00U, 0x20U, 0x0AU, 0x20U, 0x00U, 0x36U, + 0x02U, 0x00U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x0CU, 0x21U, 0x00U, 0x20U, + 0x00U, 0xADU, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0x18U, 0x88U, 0x21U, + 0x01U, 0x20U, 0x01U, 0x42U, 0xFFU, 0x01U, 0x83U, 0x21U, 0x01U, 0x20U, + 0x01U, 0xA7U, 0x21U, 0x00U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x00U, 0x21U, + 0x0BU, 0x20U, 0x0BU, 0x20U, 0x00U, 0x3AU, 0x00U, 0x00U, 0x20U, 0x0AU, + 0x28U, 0x02U, 0x0CU, 0x21U, 0x00U, 0x20U, 0x00U, 0xADU, 0x21U, 0x01U, + 0x20U, 0x01U, 0x42U, 0x10U, 0x88U, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, + 0xFFU, 0x01U, 0x83U, 0x21U, 0x01U, 0x20U, 0x01U, 0xA7U, 0x21U, 0x00U, + 0x20U, 0x0AU, 0x28U, 0x02U, 0x00U, 0x21U, 0x0BU, 0x20U, 0x0BU, 0x20U, + 0x00U, 0x3AU, 0x00U, 0x01U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x0CU, 0x21U, + 0x00U, 0x20U, 0x00U, 0xADU, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0x08U, + 0x88U, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0xFFU, 0x01U, 0x83U, 0x21U, + 0x01U, 0x20U, 0x01U, 0xA7U, 0x21U, 0x00U, 0x20U, 0x0AU, 0x28U, 0x02U, + 0x00U, 0x21U, 0x0BU, 0x20U, 0x0BU, 0x20U, 0x00U, 0x3AU, 0x00U, 0x02U, + 0x20U, 0x0AU, 0x28U, 0x02U, 0x0CU, 0x21U, 0x00U, 0x20U, 0x00U, 0xADU, + 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0x00U, 0x88U, 0x21U, 0x01U, 0x20U, + 0x01U, 0x42U, 0xFFU, 0x01U, 0x83U, 0x21U, 0x01U, 0x20U, 0x01U, 0xA7U, + 0x21U, 0x00U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x00U, 0x21U, 0x0BU, 0x20U, + 0x0BU, 0x20U, 0x00U, 0x3AU, 0x00U, 0x03U, 0x20U, 0x0CU, 0x21U, 0x00U, + 0x20U, 0x00U, 0x41U, 0x04U, 0x42U, 0x1CU, 0x10U, 0x03U, 0x21U, 0x08U, + 0x20U, 0x08U, 0x1AU, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U, 0x04U, 0x21U, + 0x0DU, 0x20U, 0x0DU, 0x1AU, 0x20U, 0x0AU, 0x41U, 0x10U, 0x6AU, 0x21U, + 0x00U, 0x20U, 0x00U, 0x24U, 0x00U, 0x42U, 0x00U, 0x21U, 0x09U, 0x42U, + 0x00U, 0x0BU, 0x0BU, 0x0BU, 0x0BU, 0x0BU, 0xC3U, 0x03U, 0x01U, 0x00U, + 0x41U, 0x80U, 0x08U, 0x0BU, 0xBBU, 0x03U, 0x74U, 0x73U, 0x68U, 0x2EU, + 0x63U, 0x3AU, 0x20U, 0x57U, 0x65U, 0x61U, 0x6BU, 0x20U, 0x41U, 0x67U, + 0x61U, 0x69U, 0x6EU, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U, 0x63U, 0x75U, + 0x74U, 0x65U, 0x20U, 0x41U, 0x46U, 0x54U, 0x45U, 0x52U, 0x20U, 0x74U, + 0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, 0x69U, 0x6FU, 0x6EU, + 0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, 0x69U, 0x65U, + 0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U, + 0x72U, 0x00U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, 0x57U, + 0x65U, 0x61U, 0x6BU, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U, 0x63U, 0x75U, + 0x74U, 0x65U, 0x20U, 0x41U, 0x46U, 0x54U, 0x45U, 0x52U, 0x20U, 0x74U, + 0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, 0x69U, 0x6FU, 0x6EU, + 0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, 0x69U, 0x65U, + 0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U, + 0x72U, 0x00U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, 0x53U, + 0x74U, 0x72U, 0x6FU, 0x6EU, 0x67U, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U, + 0x63U, 0x75U, 0x74U, 0x65U, 0x20U, 0x42U, 0x45U, 0x46U, 0x4FU, 0x52U, + 0x45U, 0x20U, 0x74U, 0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, + 0x69U, 0x6FU, 0x6EU, 0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, + 0x6CU, 0x69U, 0x65U, 0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, + 0x64U, 0x67U, 0x65U, 0x72U, 0x00U, 0x41U, 0x41U, 0x57U, 0x00U, 0x74U, + 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, 0x53U, 0x74U, 0x61U, 0x72U, + 0x74U, 0x2EU, 0x00U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, + 0x45U, 0x6EU, 0x64U, 0x2EU, 0x00U, 0x22U, 0x74U, 0x73U, 0x68U, 0x2EU, + 0x63U, 0x3AU, 0x20U, 0x57U, 0x65U, 0x61U, 0x6BU, 0x20U, 0x41U, 0x67U, + 0x61U, 0x69U, 0x6EU, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U, 0x63U, 0x75U, + 0x74U, 0x65U, 0x20U, 0x41U, 0x46U, 0x54U, 0x45U, 0x52U, 0x20U, 0x74U, + 0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, 0x69U, 0x6FU, 0x6EU, + 0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, 0x69U, 0x65U, + 0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U, + 0x72U, 0x22U, 0x00U, 0x22U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, + 0x20U, 0x57U, 0x65U, 0x61U, 0x6BU, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U, + 0x63U, 0x75U, 0x74U, 0x65U, 0x20U, 0x41U, 0x46U, 0x54U, 0x45U, 0x52U, + 0x20U, 0x74U, 0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, 0x69U, + 0x6FU, 0x6EU, 0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, + 0x69U, 0x65U, 0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, + 0x67U, 0x65U, 0x72U, 0x22U, 0x00U, 0x22U, 0x74U, 0x73U, 0x68U, 0x2EU, + 0x63U, 0x3AU, 0x20U, 0x53U, 0x74U, 0x72U, 0x6FU, 0x6EU, 0x67U, 0x2EU, + 0x20U, 0x45U, 0x78U, 0x65U, 0x63U, 0x75U, 0x74U, 0x65U, 0x20U, 0x42U, + 0x45U, 0x46U, 0x4FU, 0x52U, 0x45U, 0x20U, 0x74U, 0x72U, 0x61U, 0x6EU, + 0x73U, 0x61U, 0x63U, 0x74U, 0x69U, 0x6FU, 0x6EU, 0x20U, 0x69U, 0x73U, + 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, 0x69U, 0x65U, 0x64U, 0x20U, 0x74U, + 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U, 0x72U, 0x22U, 0x00U, + 0x22U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, 0x53U, 0x74U, + 0x61U, 0x72U, 0x74U, 0x2EU, 0x22U, 0x00U, 0x22U, 0x74U, 0x73U, 0x68U, + 0x2EU, 0x63U, 0x3AU, 0x20U, 0x45U, 0x6EU, 0x64U, 0x2EU, 0x22U}; + + void static overrideFlag(Json::Value& jv) + { + jv[jss::Flags] = 0b00000001U; + } + + void + setTSHHook(jtx::Env& env, jtx::Account const& account) + { + using namespace test::jtx; + env(hook(account, {{hso(TshHook, overrideFlag)}}, 0), + fee(XRP(2)), + ter(tesSUCCESS)); + env.close(); + } + + void + testAccount(FeatureBitset features) + { + testcase("AccountWithHookStream"); + + using namespace std::chrono_literals; + using namespace jtx; + Env env(*this, features); + + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const gw = Account("gw"); + auto const USD = gw["USD"]; + + env.fund(XRP(10000), alice, bob, gw); + env.trust(USD(20000), alice, bob); + env.close(); + + auto wsc = makeWSClient(env.app().config()); + Json::Value stream; + + bool const withTouch = env.current()->rules().enabled(featureTouch); + { + // RPC subscribe to account stream + stream[jss::accounts] = Json::arrayValue; + stream[jss::accounts].append(bob.human()); + auto jv = wsc->invoke("subscribe", stream); + if (wsc->version() == 2) + { + BEAST_EXPECT( + jv.isMember(jss::jsonrpc) && jv[jss::jsonrpc] == "2.0"); + BEAST_EXPECT( + jv.isMember(jss::ripplerpc) && jv[jss::ripplerpc] == "2.0"); + BEAST_EXPECT(jv.isMember(jss::id) && jv[jss::id] == 5); + } + BEAST_EXPECT(jv[jss::result][jss::status] == "success"); + } + + // Test Invoke Tx + { + setTSHHook(env, bob); + // Submit and Close + env(invoke::invoke(alice), + invoke::dest(bob), + fee(XRP(1)), + ter(tesSUCCESS)); + env.close(); + + // Check stream update + BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { + if (jv[jss::transaction][jss::TransactionType] == "Invoke") + return true; + return withTouch ? false : true; + })); + } + + // RPC unsubscribe + auto jv = wsc->invoke("unsubscribe", stream); + if (wsc->version() == 2) + { + BEAST_EXPECT( + jv.isMember(jss::jsonrpc) && jv[jss::jsonrpc] == "2.0"); + BEAST_EXPECT( + jv.isMember(jss::ripplerpc) && jv[jss::ripplerpc] == "2.0"); + BEAST_EXPECT(jv.isMember(jss::id) && jv[jss::id] == 5); + } + BEAST_EXPECT(jv[jss::status] == "success"); + } + void run() override { @@ -1155,6 +1358,8 @@ public: testSubErrors(false); testSubByUrl(); testHistoryTxStream(); + testAccount(all); + testAccount(all - featureTouch); } }; From ef77b02d7f2f60a59c5987ea3c99b8a3378c5065 Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Tue, 11 Mar 2025 19:19:28 +0700 Subject: [PATCH 27/33] CI Release Builder (#455) --- .github/workflows/build-in-docker.yml | 97 ++++++++++++++++++++++----- docker-unit-tests.sh | 2 +- release-builder.sh | 25 ++++++- 3 files changed, 105 insertions(+), 19 deletions(-) diff --git a/.github/workflows/build-in-docker.yml b/.github/workflows/build-in-docker.yml index 3bc0bc20f..9d959bbc9 100644 --- a/.github/workflows/build-in-docker.yml +++ b/.github/workflows/build-in-docker.yml @@ -2,37 +2,104 @@ name: Build using Docker on: push: - branches: [ "dev", "candidate", "release", "jshooks" ] + branches: ["dev", "candidate", "release", "jshooks"] pull_request: - branches: [ "dev", "candidate", "release", "jshooks" ] + branches: ["dev", "candidate", "release", "jshooks"] concurrency: - group: ${{ github.workflow }} - cancel-in-progress: false + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + DEBUG_BUILD_CONTAINERS_AFTER_CLEANUP: 1 jobs: checkout: runs-on: [self-hosted, vanity] + outputs: + checkout_path: ${{ steps.vars.outputs.checkout_path }} steps: - - uses: actions/checkout@v4 - with: - clean: false + - name: Prepare checkout path + id: vars + run: | + SAFE_BRANCH=$(echo "${{ github.ref_name }}" | sed -e 's/[^a-zA-Z0-9._-]/-/g') + CHECKOUT_PATH="${SAFE_BRANCH}-${{ github.sha }}" + echo "checkout_path=${CHECKOUT_PATH}" >> "$GITHUB_OUTPUT" + + - uses: actions/checkout@v4 + with: + path: ${{ steps.vars.outputs.checkout_path }} + clean: true + fetch-depth: 2 # Only get the last 2 commits, to avoid fetching all history + checkpatterns: runs-on: [self-hosted, vanity] needs: checkout + defaults: + run: + working-directory: ${{ needs.checkout.outputs.checkout_path }} steps: - - name: Check for suspicious patterns - run: /bin/bash suspicious_patterns.sh + - name: Check for suspicious patterns + run: /bin/bash suspicious_patterns.sh + build: runs-on: [self-hosted, vanity] - needs: checkpatterns + needs: [checkpatterns, checkout] + defaults: + run: + working-directory: ${{ needs.checkout.outputs.checkout_path }} steps: - - name: Build using Docker - run: /bin/bash release-builder.sh + - name: Set Cleanup Script Path + run: | + echo "JOB_CLEANUP_SCRIPT=$(mktemp)" >> $GITHUB_ENV + + - name: Build using Docker + run: /bin/bash release-builder.sh + + - name: Stop Container (Cleanup) + if: always() + run: | + echo "Running cleanup script: $JOB_CLEANUP_SCRIPT" + /bin/bash -e -x "$JOB_CLEANUP_SCRIPT" + CLEANUP_EXIT_CODE=$? + + if [[ "$CLEANUP_EXIT_CODE" -eq 0 ]]; then + echo "Cleanup script succeeded." + rm -f "$JOB_CLEANUP_SCRIPT" + echo "Cleanup script removed." + else + echo "⚠️ Cleanup script failed! Keeping for debugging: $JOB_CLEANUP_SCRIPT" + fi + + if [[ "${DEBUG_BUILD_CONTAINERS_AFTER_CLEANUP}" == "1" ]]; then + echo "🔍 Checking for leftover containers..." + BUILD_CONTAINERS=$(docker ps --format '{{.Names}}' | grep '^xahaud_cached_builder' || echo "") + + if [[ -n "$BUILD_CONTAINERS" ]]; then + echo "⚠️ WARNING: Some build containers are still running" + echo "$BUILD_CONTAINERS" + else + echo "✅ No build containers found" + fi + fi + tests: runs-on: [self-hosted, vanity] - needs: build + needs: [build, checkout] + defaults: + run: + working-directory: ${{ needs.checkout.outputs.checkout_path }} steps: - - name: Unit tests - run: /bin/bash docker-unit-tests.sh + - name: Unit tests + run: /bin/bash docker-unit-tests.sh + cleanup: + runs-on: [self-hosted, vanity] + needs: [tests, checkout] + if: always() + steps: + - name: Cleanup workspace + run: | + CHECKOUT_PATH="${{ needs.checkout.outputs.checkout_path }}" + echo "Cleaning workspace for ${CHECKOUT_PATH}" + rm -rf "${{ github.workspace }}/${CHECKOUT_PATH}" diff --git a/docker-unit-tests.sh b/docker-unit-tests.sh index 544d0eecb..6bfde48c0 100644 --- a/docker-unit-tests.sh +++ b/docker-unit-tests.sh @@ -1,4 +1,4 @@ #!/bin/bash +echo "Mounting $(pwd)/io in ubuntu and running unit tests" docker run --rm -i -v $(pwd):/io ubuntu sh -c '/io/release-build/xahaud -u' - diff --git a/release-builder.sh b/release-builder.sh index eb3fb6c09..2fa86d933 100755 --- a/release-builder.sh +++ b/release-builder.sh @@ -5,8 +5,6 @@ # debugging. set -ex -set -e - echo "START BUILDING (HOST)" echo "Cleaning previously built binary" @@ -19,7 +17,26 @@ if [[ "$GITHUB_REPOSITORY" == "" ]]; then BUILD_CORES=8 fi -CONTAINER_NAME=xahaud_cached_builder_$(echo "$GITHUB_ACTOR" | awk '{print tolower($0)}') +EXIT_IF_CONTAINER_RUNNING=${EXIT_IF_CONTAINER_RUNNING:-1} +# Ensure still works outside of GH Actions by setting these to /dev/null +# GA will run this script and then delete it at the end of the job +JOB_CLEANUP_SCRIPT=${JOB_CLEANUP_SCRIPT:-/dev/null} +NORMALIZED_WORKFLOW=$(echo "$GITHUB_WORKFLOW" | tr -c 'a-zA-Z0-9' '-') +NORMALIZED_REF=$(echo "$GITHUB_REF" | tr -c 'a-zA-Z0-9' '-') +CONTAINER_NAME="xahaud_cached_builder_${NORMALIZED_WORKFLOW}-${NORMALIZED_REF}" + +# Check if the container is already running +if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then + echo "⚠️ A running container (${CONTAINER_NAME}) was detected." + + if [[ "$EXIT_IF_CONTAINER_RUNNING" -eq 1 ]]; then + echo "❌ EXIT_IF_CONTAINER_RUNNING is set. Exiting." + exit 1 + else + echo "🛑 Stopping the running container: ${CONTAINER_NAME}" + docker stop "${CONTAINER_NAME}" + fi +fi echo "-- BUILD CORES: $BUILD_CORES" echo "-- GITHUB_REPOSITORY: $GITHUB_REPOSITORY" @@ -62,6 +79,8 @@ else # GH Action, runner echo "GH Action, runner, clean & re-create create persistent container" docker rm -f $CONTAINER_NAME + echo "echo 'Stopping container: $CONTAINER_NAME'" >> "$JOB_CLEANUP_SCRIPT" + echo "docker stop --time=15 \"$CONTAINER_NAME\" || echo 'Failed to stop container or container not running'" >> "$JOB_CLEANUP_SCRIPT" docker run -di --user 0:$(id -g) --name $CONTAINER_NAME -v /data/builds:/data/builds -v `pwd`:/io --network host ghcr.io/foobarwidget/holy-build-box-x64 /hbb_exe/activate-exec bash docker exec -i $CONTAINER_NAME /hbb_exe/activate-exec bash -x /io/build-full.sh "$GITHUB_REPOSITORY" "$GITHUB_SHA" "$BUILD_CORES" "$GITHUB_RUN_NUMBER" docker stop $CONTAINER_NAME From d088ad61a90410229c46dd37d309dbdc8d8fed1d Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Tue, 18 Mar 2025 15:37:18 +0700 Subject: [PATCH 28/33] Prevent dangling reference in getHash() (#475) Replace temporary uint256 with static variable when returning fallback hash to avoid returning a const reference to a local temporary object. --- src/ripple/rpc/handlers/ServerDefinitions.cpp | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/ripple/rpc/handlers/ServerDefinitions.cpp b/src/ripple/rpc/handlers/ServerDefinitions.cpp index e44ef2a94..e0a7d9c13 100644 --- a/src/ripple/rpc/handlers/ServerDefinitions.cpp +++ b/src/ripple/rpc/handlers/ServerDefinitions.cpp @@ -462,15 +462,9 @@ public: uint256 const& getHash() const { - if (!defsHash) - { - // should be unreachable - // if this does happen we don't want 0 xor 0 so use a random value - // here - return uint256( - "DF4220E93ADC6F5569063A01B4DC79F8DB9553B6A3222ADE23DEA0"); - } - return *defsHash; + static const uint256 fallbackHash( + "DF4220E93ADC6F5569063A01B4DC79F8DB9553B6A3222ADE23DEA0"); + return defsHash ? *defsHash : fallbackHash; } Json::Value const& From 0b675465b4e038e6080146043ad7fb2bfaf1a53e Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Wed, 19 Mar 2025 09:32:27 +0700 Subject: [PATCH 29/33] Fix ServerDefinitions_test regression intro in #475 (#477) --- src/ripple/rpc/handlers/ServerDefinitions.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/ripple/rpc/handlers/ServerDefinitions.cpp b/src/ripple/rpc/handlers/ServerDefinitions.cpp index e0a7d9c13..0138266b5 100644 --- a/src/ripple/rpc/handlers/ServerDefinitions.cpp +++ b/src/ripple/rpc/handlers/ServerDefinitions.cpp @@ -462,9 +462,13 @@ public: uint256 const& getHash() const { - static const uint256 fallbackHash( - "DF4220E93ADC6F5569063A01B4DC79F8DB9553B6A3222ADE23DEA0"); - return defsHash ? *defsHash : fallbackHash; + if (!defsHash) + { + static const uint256 fallbackHash( + "DF4220E93ADC6F5569063A01B4DC79F8DB9553B6A3222ADE23DEA0"); + return fallbackHash; + } + return *defsHash; } Json::Value const& From e84a36867b94059825f4f8dce84197f938684252 Mon Sep 17 00:00:00 2001 From: RichardAH Date: Tue, 1 Apr 2025 16:47:48 +1000 Subject: [PATCH 30/33] Catalogue (#443) --- Builds/CMake/RippledCore.cmake | 3 + Builds/CMake/deps/Boost.cmake | 8 +- Builds/CMake/deps/FindBoost.cmake | 32 +- Builds/CMake/deps/gRPC.cmake | 7 +- CMakeLists.txt | 12 +- src/ripple/app/ledger/Ledger.cpp | 27 + src/ripple/app/ledger/Ledger.h | 13 + src/ripple/app/ledger/LedgerHistory.h | 2 - src/ripple/app/ledger/LedgerMaster.h | 14 +- src/ripple/app/ledger/impl/LedgerMaster.cpp | 69 +- src/ripple/app/misc/NetworkOPs.cpp | 2 + src/ripple/app/misc/SHAMapStoreImp.cpp | 1 + .../app/rdb/backend/impl/SQLiteDatabase.cpp | 221 +++- src/ripple/basics/base_uint.h | 12 + src/ripple/net/RPCErr.h | 2 +- src/ripple/net/impl/RPCErr.cpp | 8 +- .../nodestore/impl/DatabaseRotatingImp.cpp | 58 +- .../nodestore/impl/DatabaseRotatingImp.h | 3 + src/ripple/protocol/ErrorCodes.h | 5 +- src/ripple/protocol/impl/ErrorCodes.cpp | 3 +- src/ripple/protocol/jss.h | 166 ++- src/ripple/rpc/handlers/Catalogue.cpp | 1141 +++++++++++++++++ src/ripple/rpc/handlers/Handlers.h | 6 + src/ripple/rpc/impl/Handler.cpp | 3 + src/ripple/rpc/impl/RPCHelpers.cpp | 43 +- src/ripple/shamap/SHAMap.h | 30 + src/ripple/shamap/SHAMapTreeNode.h | 6 +- src/ripple/shamap/impl/SHAMap.cpp | 395 ++++++ src/test/rpc/Catalogue_test.cpp | 865 +++++++++++++ 29 files changed, 3045 insertions(+), 112 deletions(-) create mode 100644 src/ripple/rpc/handlers/Catalogue.cpp create mode 100644 src/test/rpc/Catalogue_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 17969c28f..8f44b97af 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -606,6 +606,7 @@ target_sources (rippled PRIVATE src/ripple/rpc/handlers/BlackList.cpp src/ripple/rpc/handlers/BookOffers.cpp src/ripple/rpc/handlers/CanDelete.cpp + src/ripple/rpc/handlers/Catalogue.cpp src/ripple/rpc/handlers/Connect.cpp src/ripple/rpc/handlers/ConsensusInfo.cpp src/ripple/rpc/handlers/CrawlShards.cpp @@ -661,6 +662,7 @@ target_sources (rippled PRIVATE src/ripple/rpc/handlers/ValidatorListSites.cpp src/ripple/rpc/handlers/Validators.cpp src/ripple/rpc/handlers/WalletPropose.cpp + src/ripple/rpc/handlers/Catalogue.cpp src/ripple/rpc/impl/DeliveredAmount.cpp src/ripple/rpc/impl/Handler.cpp src/ripple/rpc/impl/LegacyPathFind.cpp @@ -995,6 +997,7 @@ if (tests) src/test/rpc/AccountTx_test.cpp src/test/rpc/AmendmentBlocked_test.cpp src/test/rpc/Book_test.cpp + src/test/rpc/Catalogue_test.cpp src/test/rpc/DepositAuthorized_test.cpp src/test/rpc/DeliveredAmount_test.cpp src/test/rpc/Feature_test.cpp diff --git a/Builds/CMake/deps/Boost.cmake b/Builds/CMake/deps/Boost.cmake index 5038234bc..6469ba15d 100644 --- a/Builds/CMake/deps/Boost.cmake +++ b/Builds/CMake/deps/Boost.cmake @@ -1,14 +1,16 @@ #[===================================================================[ NIH dep: boost #]===================================================================] - if((NOT DEFINED BOOST_ROOT) AND(DEFINED ENV{BOOST_ROOT})) set(BOOST_ROOT $ENV{BOOST_ROOT}) endif() +if((NOT DEFINED BOOST_LIBRARYDIR) AND(DEFINED ENV{BOOST_LIBRARYDIR})) + set(BOOST_LIBRARYDIR $ENV{BOOST_LIBRARYDIR}) +endif() file(TO_CMAKE_PATH "${BOOST_ROOT}" BOOST_ROOT) if(WIN32 OR CYGWIN) # Workaround for MSVC having two boost versions - x86 and x64 on same PC in stage folders - if(DEFINED BOOST_ROOT) + if((NOT DEFINED BOOST_LIBRARYDIR) AND (DEFINED BOOST_ROOT)) if(IS_DIRECTORY ${BOOST_ROOT}/stage64/lib) set(BOOST_LIBRARYDIR ${BOOST_ROOT}/stage64/lib) elseif(IS_DIRECTORY ${BOOST_ROOT}/stage/lib) @@ -55,6 +57,7 @@ find_package(Boost 1.86 REQUIRED program_options regex system + iostreams thread) add_library(ripple_boost INTERFACE) @@ -74,6 +77,7 @@ target_link_libraries(ripple_boost Boost::coroutine Boost::date_time Boost::filesystem + Boost::iostreams Boost::program_options Boost::regex Boost::system diff --git a/Builds/CMake/deps/FindBoost.cmake b/Builds/CMake/deps/FindBoost.cmake index 121e72641..b55c78365 100644 --- a/Builds/CMake/deps/FindBoost.cmake +++ b/Builds/CMake/deps/FindBoost.cmake @@ -248,6 +248,7 @@ include(FindPackageHandleStandardArgs) # Save project's policies cmake_policy(PUSH) cmake_policy(SET CMP0057 NEW) # if IN_LIST +#cmake_policy(SET CMP0144 NEW) #------------------------------------------------------------------------------- # Before we go searching, check whether a boost cmake package is available, unless @@ -969,7 +970,24 @@ function(_Boost_COMPONENT_DEPENDENCIES component _ret) set(_Boost_WAVE_DEPENDENCIES filesystem serialization thread chrono date_time atomic) set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) endif() - if(NOT Boost_VERSION_STRING VERSION_LESS 1.77.0) + + # Special handling for Boost 1.86.0 and higher + if(NOT Boost_VERSION_STRING VERSION_LESS 1.86.0) + # Explicitly set these for Boost 1.86 + set(_Boost_IOSTREAMS_DEPENDENCIES "") # No dependencies for iostreams in 1.86 + + # Debug output to help diagnose the issue + if(Boost_DEBUG) + message(STATUS "Using special dependency settings for Boost 1.86.0+") + message(STATUS "Component: ${component}, uppercomponent: ${uppercomponent}") + message(STATUS "Boost_VERSION_STRING: ${Boost_VERSION_STRING}") + message(STATUS "BOOST_ROOT: $ENV{BOOST_ROOT}") + message(STATUS "BOOST_LIBRARYDIR: $ENV{BOOST_LIBRARYDIR}") + endif() + endif() + + # Only show warning for versions beyond what we've defined + if(NOT Boost_VERSION_STRING VERSION_LESS 1.87.0) message(WARNING "New Boost version may have incorrect or missing dependencies and imported targets") endif() endif() @@ -1879,6 +1897,18 @@ foreach(COMPONENT ${Boost_FIND_COMPONENTS}) list(INSERT _boost_LIBRARY_SEARCH_DIRS_RELEASE 0 ${Boost_LIBRARY_DIR_DEBUG}) endif() + if(NOT Boost_VERSION_STRING VERSION_LESS 1.86.0) + if(BOOST_LIBRARYDIR AND EXISTS "${BOOST_LIBRARYDIR}") + # Clear existing search paths and use only BOOST_LIBRARYDIR + set(_boost_LIBRARY_SEARCH_DIRS_RELEASE "${BOOST_LIBRARYDIR}" NO_DEFAULT_PATH) + set(_boost_LIBRARY_SEARCH_DIRS_DEBUG "${BOOST_LIBRARYDIR}" NO_DEFAULT_PATH) + + if(Boost_DEBUG) + message(STATUS "Boost 1.86: Setting library search dirs to BOOST_LIBRARYDIR: ${BOOST_LIBRARYDIR}") + endif() + endif() + endif() + # Avoid passing backslashes to _Boost_FIND_LIBRARY due to macro re-parsing. string(REPLACE "\\" "/" _boost_LIBRARY_SEARCH_DIRS_tmp "${_boost_LIBRARY_SEARCH_DIRS_RELEASE}") diff --git a/Builds/CMake/deps/gRPC.cmake b/Builds/CMake/deps/gRPC.cmake index 8dd094175..e4beaf89d 100644 --- a/Builds/CMake/deps/gRPC.cmake +++ b/Builds/CMake/deps/gRPC.cmake @@ -74,7 +74,11 @@ else () if (NOT _location) message (FATAL_ERROR "using pkg-config for grpc, can't find c-ares") endif () - add_library (c-ares::cares ${_static} IMPORTED GLOBAL) + if(${_location} MATCHES "\\.a$") + add_library(c-ares::cares STATIC IMPORTED GLOBAL) + else() + add_library(c-ares::cares SHARED IMPORTED GLOBAL) + endif() set_target_properties (c-ares::cares PROPERTIES IMPORTED_LOCATION ${_location} INTERFACE_INCLUDE_DIRECTORIES "${${_prefix}_INCLUDE_DIRS}" @@ -204,6 +208,7 @@ else () CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_STANDARD=17 $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> $<$:-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}> $<$:-DVCPKG_TARGET_TRIPLET=${VCPKG_TARGET_TRIPLET}> diff --git a/CMakeLists.txt b/CMakeLists.txt index d62541fad..25b530328 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,14 +1,18 @@ cmake_minimum_required (VERSION 3.16) +set(CMAKE_CXX_EXTENSIONS OFF) +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + if (POLICY CMP0074) cmake_policy(SET CMP0074 NEW) endif () -project (rippled) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_STANDARD 20) -set(CMAKE_CXX_STANDARD_REQUIRED ON) +if(POLICY CMP0144) + cmake_policy(SET CMP0144 NEW) +endif() +project (rippled) set(Boost_NO_BOOST_CMAKE ON) # make GIT_COMMIT_HASH define available to all sources diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 081ed9c8f..345c3bb28 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -305,6 +305,20 @@ Ledger::Ledger( } } +Ledger::Ledger( + LedgerInfo& info, + Config const& config, + Family& family, + SHAMap const& baseState) + : mImmutable(false) + , info_(info) + , txMap_(SHAMapType::TRANSACTION, family) + , stateMap_(baseState, true) + , rules_{config.features} + , j_(beast::Journal(beast::Journal::getNullSink())) +{ +} + // Create a new ledger that follows this one Ledger::Ledger(Ledger const& prevLedger, NetClock::time_point closeTime) : mImmutable(false) @@ -385,6 +399,19 @@ Ledger::setImmutable(bool rehash) setup(); } +// raw setters for catalogue +void +Ledger::setCloseFlags(int closeFlags) +{ + info_.closeFlags = closeFlags; +} + +void +Ledger::setDrops(uint64_t drops) +{ + info_.drops = drops; +} + void Ledger::setAccepted( NetClock::time_point closeTime, diff --git a/src/ripple/app/ledger/Ledger.h b/src/ripple/app/ledger/Ledger.h index 051b322e2..bf2d64ffd 100644 --- a/src/ripple/app/ledger/Ledger.h +++ b/src/ripple/app/ledger/Ledger.h @@ -121,6 +121,13 @@ public: Family& family, beast::Journal j); + // used when loading ledgers from catalogue files + Ledger( + LedgerInfo& info, + Config const& config, + Family& family, + SHAMap const& baseState); + /** Create a new ledger following a previous ledger The ledger will have the sequence number that @@ -275,6 +282,12 @@ public: void setImmutable(bool rehash = true); + void + setCloseFlags(int closeFlags); + + void + setDrops(uint64_t drops); + bool isImmutable() const { diff --git a/src/ripple/app/ledger/LedgerHistory.h b/src/ripple/app/ledger/LedgerHistory.h index 5733ca763..a50d7eabe 100644 --- a/src/ripple/app/ledger/LedgerHistory.h +++ b/src/ripple/app/ledger/LedgerHistory.h @@ -70,8 +70,6 @@ public: LedgerHash getLedgerHash(LedgerIndex ledgerIndex); - /** Remove stale cache entries - */ void sweep() { diff --git a/src/ripple/app/ledger/LedgerMaster.h b/src/ripple/app/ledger/LedgerMaster.h index 040ef3bf6..8735b30ca 100644 --- a/src/ripple/app/ledger/LedgerMaster.h +++ b/src/ripple/app/ledger/LedgerMaster.h @@ -128,7 +128,7 @@ public: getEarliestFetch(); bool - storeLedger(std::shared_ptr ledger); + storeLedger(std::shared_ptr ledger, bool pin = false); void setFullLedger( @@ -152,9 +152,15 @@ public: std::string getCompleteLedgers(); + std::string + getPinnedLedgers(); + RangeSet getCompleteLedgersRangeSet(); + RangeSet + getPinnedLedgersRangeSet(); + /** Apply held transactions to the open ledger This is normally called as we close the ledger. The open ledger remains open to handle new transactions @@ -200,7 +206,10 @@ public: getLedgerByHash(uint256 const& hash); void - setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV); + setLedgerRangePresent( + std::uint32_t minV, + std::uint32_t maxV, + bool pin = false /* if true, do not let these leaders be removed */); std::optional getCloseTimeBySeq(LedgerIndex ledgerIndex); @@ -373,6 +382,7 @@ private: std::recursive_mutex mCompleteLock; RangeSet mCompleteLedgers; + RangeSet mPinnedLedgers; // Track pinned ledger ranges // Publish thread is running. bool mAdvanceThread{false}; diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 4a3301a9c..35f56add3 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -533,11 +533,20 @@ LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) } bool -LedgerMaster::storeLedger(std::shared_ptr ledger) +LedgerMaster::storeLedger(std::shared_ptr ledger, bool pin) { bool validated = ledger->info().validated; // Returns true if we already had the ledger - return mLedgerHistory.insert(std::move(ledger), validated); + if (!mLedgerHistory.insert(std::move(ledger), validated)) + return false; + + if (pin) + { + uint32_t seq = ledger->info().seq; + mPinnedLedgers.insert(range(seq, seq)); + JLOG(m_journal.info()) << "Pinned ledger : " << seq; + } + return true; } /** Apply held transactions to the open ledger @@ -595,6 +604,15 @@ void LedgerMaster::clearLedger(std::uint32_t seq) { std::lock_guard sl(mCompleteLock); + + // Don't clear pinned ledgers + if (boost::icl::contains(mPinnedLedgers, seq)) + { + JLOG(m_journal.trace()) + << "Ledger " << seq << " is pinned, not clearing"; + return; + } + mCompleteLedgers.erase(seq); } @@ -1714,6 +1732,13 @@ LedgerMaster::getCompleteLedgers() return to_string(mCompleteLedgers); } +std::string +LedgerMaster::getPinnedLedgers() +{ + std::lock_guard sl(mCompleteLock); + return to_string(mPinnedLedgers); +} + RangeSet LedgerMaster::getCompleteLedgersRangeSet() { @@ -1721,6 +1746,13 @@ LedgerMaster::getCompleteLedgersRangeSet() return mCompleteLedgers; } +RangeSet +LedgerMaster::getPinnedLedgersRangeSet() +{ + std::lock_guard sl(mCompleteLock); + return mPinnedLedgers; +} + std::optional LedgerMaster::getCloseTimeBySeq(LedgerIndex ledgerIndex) { @@ -1876,15 +1908,26 @@ LedgerMaster::getLedgerByHash(uint256 const& hash) } void -LedgerMaster::setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV) +LedgerMaster::setLedgerRangePresent( + std::uint32_t minV, + std::uint32_t maxV, + bool pin) { std::lock_guard sl(mCompleteLock); mCompleteLedgers.insert(range(minV, maxV)); + + if (pin) + { + mPinnedLedgers.insert(range(minV, maxV)); + JLOG(m_journal.info()) + << "Pinned ledger range: " << minV << " - " << maxV; + } } void LedgerMaster::sweep() { + std::lock_guard sl(mCompleteLock); mLedgerHistory.sweep(); fetch_packs_.sweep(); } @@ -1899,8 +1942,24 @@ void LedgerMaster::clearPriorLedgers(LedgerIndex seq) { std::lock_guard sl(mCompleteLock); - if (seq > 0) - mCompleteLedgers.erase(range(0u, seq - 1)); + if (seq <= 0) + return; + + // First, save a copy of the pinned ledgers + auto pinnedCopy = mPinnedLedgers; + + // Clear everything before seq + RangeSet toClear; + toClear.insert(range(0u, seq - 1)); + for (auto const& interval : toClear) + mCompleteLedgers.erase(interval); + + // Re-add the pinned ledgers to ensure they're preserved + for (auto const& interval : pinnedCopy) + mCompleteLedgers.insert(interval); + + JLOG(m_journal.debug()) << "clearPriorLedgers: after restoration, pinned=" + << to_string(mPinnedLedgers); } void diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index df1b1ba08..6db57fda1 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -2493,6 +2493,8 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) toBase58(TokenType::NodePublic, app_.nodeIdentity().first); info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers(); + info[jss::complete_ledgers_pinned] = + app_.getLedgerMaster().getPinnedLedgers(); if (amendmentBlocked_) info[jss::amendment_blocked] = true; diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index 48347537b..1fd8ed1e6 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -209,6 +209,7 @@ SHAMapStoreImp::makeNodeStore(int readThreads) // Create NodeStore with two backends to allow online deletion of // data auto dbr = std::make_unique( + app_, scheduler_, readThreads, std::move(writableBackend), diff --git a/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp b/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp index 547ab843b..d07c2586c 100644 --- a/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp +++ b/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp @@ -601,11 +601,44 @@ SQLiteDatabaseImp::deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) void SQLiteDatabaseImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) { + // Get a reference to the pinned ledgers set for quick lookups + RangeSet pinnedLedgers; + { + auto& ledgerMaster = app_.getLedgerMaster(); + // Use public API to get the pinned ledgers + pinnedLedgers = ledgerMaster.getPinnedLedgersRangeSet(); + } + if (existsLedger()) { auto db = checkoutLedger(); - detail::deleteBeforeLedgerSeq( - *db, detail::TableType::Ledgers, ledgerSeq); + + // Check if any ledgers in the range to be deleted are pinned + bool hasPinnedLedgers = false; + for (LedgerIndex seq = 1; seq < ledgerSeq && !hasPinnedLedgers; ++seq) + { + if (boost::icl::contains(pinnedLedgers, seq)) + hasPinnedLedgers = true; + } + + if (!hasPinnedLedgers) + { + // No pinned ledgers in the range, proceed with normal delete + detail::deleteBeforeLedgerSeq( + *db, detail::TableType::Ledgers, ledgerSeq); + } + else + { + // Delete ledgers individually, skipping pinned ones + for (LedgerIndex seq = 1; seq < ledgerSeq; ++seq) + { + if (!boost::icl::contains(pinnedLedgers, seq)) + { + detail::deleteByLedgerSeq( + *db, detail::TableType::Ledgers, seq); + } + } + } return; } @@ -614,8 +647,39 @@ SQLiteDatabaseImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) iterateLedgerBack( seqToShardIndex(ledgerSeq), [&](soci::session& session, std::uint32_t shardIndex) { - detail::deleteBeforeLedgerSeq( - session, detail::TableType::Ledgers, ledgerSeq); + LedgerIndex firstSeq = firstLedgerSeq(shardIndex); + LedgerIndex lastSeq = + std::min(lastLedgerSeq(shardIndex), ledgerSeq - 1); + + // Check if any ledgers in this shard's range are pinned + bool hasPinnedLedgers = false; + for (LedgerIndex seq = firstSeq; + seq <= lastSeq && !hasPinnedLedgers; + ++seq) + { + if (boost::icl::contains(pinnedLedgers, seq)) + hasPinnedLedgers = true; + } + + if (!hasPinnedLedgers) + { + // No pinned ledgers in this shard range, proceed with + // normal delete + detail::deleteBeforeLedgerSeq( + session, detail::TableType::Ledgers, ledgerSeq); + } + else + { + // Delete ledgers individually, skipping pinned ones + for (LedgerIndex seq = firstSeq; seq <= lastSeq; ++seq) + { + if (!boost::icl::contains(pinnedLedgers, seq)) + { + detail::deleteByLedgerSeq( + session, detail::TableType::Ledgers, seq); + } + } + } return true; }); } @@ -627,11 +691,43 @@ SQLiteDatabaseImp::deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) if (!useTxTables_) return; + // Get a reference to the pinned ledgers set for quick lookups + RangeSet pinnedLedgers; + { + auto& ledgerMaster = app_.getLedgerMaster(); + pinnedLedgers = ledgerMaster.getPinnedLedgersRangeSet(); + } + if (existsTransaction()) { auto db = checkoutTransaction(); - detail::deleteBeforeLedgerSeq( - *db, detail::TableType::Transactions, ledgerSeq); + + // Check if any ledgers in the range to be deleted are pinned + bool hasPinnedLedgers = false; + for (LedgerIndex seq = 1; seq < ledgerSeq && !hasPinnedLedgers; ++seq) + { + if (boost::icl::contains(pinnedLedgers, seq)) + hasPinnedLedgers = true; + } + + if (!hasPinnedLedgers) + { + // No pinned ledgers in the range, proceed with normal delete + detail::deleteBeforeLedgerSeq( + *db, detail::TableType::Transactions, ledgerSeq); + } + else + { + // Delete transaction data individually, skipping pinned ledgers + for (LedgerIndex seq = 1; seq < ledgerSeq; ++seq) + { + if (!boost::icl::contains(pinnedLedgers, seq)) + { + detail::deleteByLedgerSeq( + *db, detail::TableType::Transactions, seq); + } + } + } return; } @@ -640,8 +736,40 @@ SQLiteDatabaseImp::deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) iterateTransactionBack( seqToShardIndex(ledgerSeq), [&](soci::session& session, std::uint32_t shardIndex) { - detail::deleteBeforeLedgerSeq( - session, detail::TableType::Transactions, ledgerSeq); + LedgerIndex firstSeq = firstLedgerSeq(shardIndex); + LedgerIndex lastSeq = + std::min(lastLedgerSeq(shardIndex), ledgerSeq - 1); + + // Check if any ledgers in this shard's range are pinned + bool hasPinnedLedgers = false; + for (LedgerIndex seq = firstSeq; + seq <= lastSeq && !hasPinnedLedgers; + ++seq) + { + if (boost::icl::contains(pinnedLedgers, seq)) + hasPinnedLedgers = true; + } + + if (!hasPinnedLedgers) + { + // No pinned ledgers in this shard range, proceed with + // normal delete + detail::deleteBeforeLedgerSeq( + session, detail::TableType::Transactions, ledgerSeq); + } + else + { + // Delete transaction data individually, skipping pinned + // ledgers + for (LedgerIndex seq = firstSeq; seq <= lastSeq; ++seq) + { + if (!boost::icl::contains(pinnedLedgers, seq)) + { + detail::deleteByLedgerSeq( + session, detail::TableType::Transactions, seq); + } + } + } return true; }); } @@ -654,11 +782,44 @@ SQLiteDatabaseImp::deleteAccountTransactionsBeforeLedgerSeq( if (!useTxTables_) return; + // Get a reference to the pinned ledgers set for quick lookups + RangeSet pinnedLedgers; + { + auto& ledgerMaster = app_.getLedgerMaster(); + pinnedLedgers = ledgerMaster.getPinnedLedgersRangeSet(); + } + if (existsTransaction()) { auto db = checkoutTransaction(); - detail::deleteBeforeLedgerSeq( - *db, detail::TableType::AccountTransactions, ledgerSeq); + + // Check if any ledgers in the range to be deleted are pinned + bool hasPinnedLedgers = false; + for (LedgerIndex seq = 1; seq < ledgerSeq && !hasPinnedLedgers; ++seq) + { + if (boost::icl::contains(pinnedLedgers, seq)) + hasPinnedLedgers = true; + } + + if (!hasPinnedLedgers) + { + // No pinned ledgers in the range, proceed with normal delete + detail::deleteBeforeLedgerSeq( + *db, detail::TableType::AccountTransactions, ledgerSeq); + } + else + { + // Delete account transaction data individually, skipping pinned + // ledgers + for (LedgerIndex seq = 1; seq < ledgerSeq; ++seq) + { + if (!boost::icl::contains(pinnedLedgers, seq)) + { + detail::deleteByLedgerSeq( + *db, detail::TableType::AccountTransactions, seq); + } + } + } return; } @@ -667,8 +828,44 @@ SQLiteDatabaseImp::deleteAccountTransactionsBeforeLedgerSeq( iterateTransactionBack( seqToShardIndex(ledgerSeq), [&](soci::session& session, std::uint32_t shardIndex) { - detail::deleteBeforeLedgerSeq( - session, detail::TableType::AccountTransactions, ledgerSeq); + LedgerIndex firstSeq = firstLedgerSeq(shardIndex); + LedgerIndex lastSeq = + std::min(lastLedgerSeq(shardIndex), ledgerSeq - 1); + + // Check if any ledgers in this shard's range are pinned + bool hasPinnedLedgers = false; + for (LedgerIndex seq = firstSeq; + seq <= lastSeq && !hasPinnedLedgers; + ++seq) + { + if (boost::icl::contains(pinnedLedgers, seq)) + hasPinnedLedgers = true; + } + + if (!hasPinnedLedgers) + { + // No pinned ledgers in this shard range, proceed with + // normal delete + detail::deleteBeforeLedgerSeq( + session, + detail::TableType::AccountTransactions, + ledgerSeq); + } + else + { + // Delete account transaction data individually, skipping + // pinned ledgers + for (LedgerIndex seq = firstSeq; seq <= lastSeq; ++seq) + { + if (!boost::icl::contains(pinnedLedgers, seq)) + { + detail::deleteByLedgerSeq( + session, + detail::TableType::AccountTransactions, + seq); + } + } + } return true; }); } diff --git a/src/ripple/basics/base_uint.h b/src/ripple/basics/base_uint.h index 93c5df8d6..40c6ca4f9 100644 --- a/src/ripple/basics/base_uint.h +++ b/src/ripple/basics/base_uint.h @@ -129,6 +129,18 @@ public: return reinterpret_cast(data_.data()); } + char const* + cdata() const + { + return reinterpret_cast(data_.data()); + } + + char* + cdata() + { + return reinterpret_cast(data_.data()); + } + iterator begin() { diff --git a/src/ripple/net/RPCErr.h b/src/ripple/net/RPCErr.h index e49e96b3d..bea3729c8 100644 --- a/src/ripple/net/RPCErr.h +++ b/src/ripple/net/RPCErr.h @@ -28,7 +28,7 @@ namespace ripple { bool isRpcError(Json::Value jvResult); Json::Value -rpcError(int iError); +rpcError(int iError, std::string msg = ""); } // namespace ripple diff --git a/src/ripple/net/impl/RPCErr.cpp b/src/ripple/net/impl/RPCErr.cpp index 8af2a248c..47fdaa220 100644 --- a/src/ripple/net/impl/RPCErr.cpp +++ b/src/ripple/net/impl/RPCErr.cpp @@ -26,10 +26,14 @@ struct RPCErr; // VFALCO NOTE Deprecated function Json::Value -rpcError(int iError) +rpcError(int iError, std::string msg) { Json::Value jvResult(Json::objectValue); - RPC::inject_error(iError, jvResult); + if (msg != "") + RPC::inject_error(static_cast(iError), msg, jvResult); + else + RPC::inject_error(iError, jvResult); + return jvResult; } diff --git a/src/ripple/nodestore/impl/DatabaseRotatingImp.cpp b/src/ripple/nodestore/impl/DatabaseRotatingImp.cpp index 267b4ee58..aa8f89962 100644 --- a/src/ripple/nodestore/impl/DatabaseRotatingImp.cpp +++ b/src/ripple/nodestore/impl/DatabaseRotatingImp.cpp @@ -18,6 +18,8 @@ //============================================================================== #include +#include +#include #include #include @@ -25,6 +27,7 @@ namespace ripple { namespace NodeStore { DatabaseRotatingImp::DatabaseRotatingImp( + Application& app, Scheduler& scheduler, int readThreads, std::shared_ptr writableBackend, @@ -32,6 +35,7 @@ DatabaseRotatingImp::DatabaseRotatingImp( Section const& config, beast::Journal j) : DatabaseRotating(scheduler, readThreads, config, j) + , app_(app) , writableBackend_(std::move(writableBackend)) , archiveBackend_(std::move(archiveBackend)) { @@ -48,8 +52,58 @@ DatabaseRotatingImp::rotateWithLock( { std::lock_guard lock(mutex_); + // Create the new backend auto newBackend = f(writableBackend_->getName()); + + // Before rotating, ensure all pinned ledgers are in the writable backend + JLOG(j_.info()) + << "Ensuring pinned ledgers are preserved before backend rotation"; + + // Use a lambda to handle the preservation of pinned ledgers + auto ensurePinnedLedgersInWritable = [this]() { + // Get list of pinned ledgers + auto pinnedLedgers = app_.getLedgerMaster().getPinnedLedgersRangeSet(); + + for (auto const& range : pinnedLedgers) + { + for (auto seq = range.lower(); seq <= range.upper(); ++seq) + { + uint256 hash = app_.getLedgerMaster().getHashBySeq(seq); + if (hash.isZero()) + continue; + + // Try to load the ledger + auto ledger = app_.getLedgerMaster().getLedgerByHash(hash); + if (ledger && ledger->isImmutable()) + { + // If we have the ledger, store it in the writable backend + JLOG(j_.debug()) << "Ensuring pinned ledger " << seq + << " is in writable backend"; + Database::storeLedger(*ledger, writableBackend_); + } + else + { + // If we don't have the ledger in memory, try to fetch its + // objects directly + JLOG(j_.debug()) << "Attempting to copy pinned ledger " + << seq << " header to writable backend"; + std::shared_ptr headerObj; + Status status = + archiveBackend_->fetch(hash.data(), &headerObj); + if (status == ok && headerObj) + writableBackend_->store(headerObj); + } + } + } + }; + + // Execute the lambda + ensurePinnedLedgersInWritable(); + + // Now it's safe to mark the archive backend for deletion archiveBackend_->setDeletePath(); + + // Complete the rotation archiveBackend_ = std::move(writableBackend_); writableBackend_ = std::move(newBackend); } @@ -180,8 +234,8 @@ DatabaseRotatingImp::fetchNodeObject( } // Update writable backend with data from the archive backend - if (duplicate) - writable->store(nodeObject); + // if (duplicate) + writable->store(nodeObject); } } diff --git a/src/ripple/nodestore/impl/DatabaseRotatingImp.h b/src/ripple/nodestore/impl/DatabaseRotatingImp.h index b2807eeab..269d45c5b 100644 --- a/src/ripple/nodestore/impl/DatabaseRotatingImp.h +++ b/src/ripple/nodestore/impl/DatabaseRotatingImp.h @@ -33,7 +33,10 @@ public: DatabaseRotatingImp& operator=(DatabaseRotatingImp const&) = delete; + Application& app_; + DatabaseRotatingImp( + Application& app, Scheduler& scheduler, int readThreads, std::shared_ptr writableBackend, diff --git a/src/ripple/protocol/ErrorCodes.h b/src/ripple/protocol/ErrorCodes.h index 8c959e9a0..311ba3775 100644 --- a/src/ripple/protocol/ErrorCodes.h +++ b/src/ripple/protocol/ErrorCodes.h @@ -143,8 +143,9 @@ enum error_code_i { rpcOBJECT_NOT_FOUND = 92, - rpcLAST = - rpcOBJECT_NOT_FOUND // rpcLAST should always equal the last code.= + rpcLEDGER_MISSING = 93, + + rpcLAST = rpcLEDGER_MISSING // rpcLAST should always equal the last code.= }; /** Codes returned in the `warnings` array of certain RPC commands. diff --git a/src/ripple/protocol/impl/ErrorCodes.cpp b/src/ripple/protocol/impl/ErrorCodes.cpp index bc31b21b8..585220710 100644 --- a/src/ripple/protocol/impl/ErrorCodes.cpp +++ b/src/ripple/protocol/impl/ErrorCodes.cpp @@ -109,7 +109,8 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcTOO_BUSY, "tooBusy", "The server is too busy to help you now.", 503}, {rpcTXN_NOT_FOUND, "txnNotFound", "Transaction not found.", 404}, {rpcNAMESPACE_NOT_FOUND, "namespaceNotFound", "Namespace not found.", 404}, - {rpcUNKNOWN_COMMAND, "unknownCmd", "Unknown method.", 405}}; + {rpcUNKNOWN_COMMAND, "unknownCmd", "Unknown method.", 405}, + {rpcLEDGER_MISSING, "ledgerMissing", "One or more ledgers in the specified range is missing", 406}}; // clang-format on // Sort and validate unorderedErrorInfos at compile time. Should be diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 963434090..d493f33b9 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -199,20 +199,21 @@ JSS(balances); // out: GatewayBalances JSS(base); // out: LogLevel JSS(base_fee); // out: NetworkOPs JSS(base_fee_no_hooks); -JSS(base_fee_xrp); // out: NetworkOPs -JSS(base_fee_native); // out: NetworkOPs -JSS(bids); // out: Subscribe -JSS(binary); // in: AccountTX, LedgerEntry, - // AccountTxOld, Tx LedgerData -JSS(blob); // out: ValidatorList -JSS(blobs_v2); // out: ValidatorList - // in: UNL -JSS(books); // in: Subscribe, Unsubscribe -JSS(both); // in: Subscribe, Unsubscribe -JSS(both_sides); // in: Subscribe, Unsubscribe -JSS(broadcast); // out: SubmitTransaction -JSS(build_path); // in: TransactionSign -JSS(build_version); // out: NetworkOPs +JSS(base_fee_xrp); // out: NetworkOPs +JSS(base_fee_native); // out: NetworkOPs +JSS(bids); // out: Subscribe +JSS(binary); // in: AccountTX, LedgerEntry, + // AccountTxOld, Tx LedgerData +JSS(blob); // out: ValidatorList +JSS(blobs_v2); // out: ValidatorList + // in: UNL +JSS(books); // in: Subscribe, Unsubscribe +JSS(both); // in: Subscribe, Unsubscribe +JSS(both_sides); // in: Subscribe, Unsubscribe +JSS(broadcast); // out: SubmitTransaction +JSS(build_path); // in: TransactionSign +JSS(build_version); // out: NetworkOPs +JSS(bytes_written); JSS(cancel_after); // out: AccountChannels JSS(can_delete); // out: CanDelete JSS(changes); // out: BookChanges @@ -237,13 +238,15 @@ JSS(code); // out: errors JSS(command); // in: RPCHandler JSS(complete); // out: NetworkOPs, InboundLedger JSS(complete_ledgers); // out: NetworkOPs, PeerImp -JSS(complete_shards); // out: OverlayImpl, PeerImp -JSS(consensus); // out: NetworkOPs, LedgerConsensus -JSS(converge_time); // out: NetworkOPs -JSS(converge_time_s); // out: NetworkOPs -JSS(cookie); // out: NetworkOPs -JSS(count); // in: AccountTx*, ValidatorList -JSS(counters); // in/out: retrieve counters +JSS(complete_ledgers_pinned); +JSS(complete_shards); // out: OverlayImpl, PeerImp +JSS(compression_level); +JSS(consensus); // out: NetworkOPs, LedgerConsensus +JSS(converge_time); // out: NetworkOPs +JSS(converge_time_s); // out: NetworkOPs +JSS(cookie); // out: NetworkOPs +JSS(count); // in: AccountTx*, ValidatorList +JSS(counters); // in/out: retrieve counters JSS(coins); JSS(children); JSS(ctid); // in/out: Tx RPC @@ -257,7 +260,8 @@ JSS(currency); // in: paths/PathRequest, STAmount // AccountLines JSS(current); // out: OwnerInfo JSS(current_activities); -JSS(current_ledger_size); // out: TxQ +JSS(current_ledger_size); // out: TxQ +JSS(current_ledger); JSS(current_queue_size); // out: TxQ JSS(data); // out: LedgerData JSS(date); // out: tx/Transaction, NetworkOPs @@ -289,18 +293,20 @@ JSS(drops); // out: TxQ JSS(duration_us); // out: NetworkOPs JSS(effective); // out: ValidatorList // in: UNL -JSS(enabled); // out: AmendmentTable -JSS(engine_result); // out: NetworkOPs, TransactionSign, Submit -JSS(engine_result_code); // out: NetworkOPs, TransactionSign, Submit -JSS(engine_result_message); // out: NetworkOPs, TransactionSign, Submit -JSS(ephemeral_key); // out: ValidatorInfo - // in/out: Manifest -JSS(error); // out: error +JSS(elapsed_seconds); +JSS(enabled); // out: AmendmentTable +JSS(engine_result); // out: NetworkOPs, TransactionSign, Submit +JSS(engine_result_code); // out: NetworkOPs, TransactionSign, Submit +JSS(engine_result_message); // out: NetworkOPs, TransactionSign, Submit +JSS(ephemeral_key); // out: ValidatorInfo + // in/out: Manifest +JSS(error); // out: error JSS(errored); -JSS(error_code); // out: error -JSS(error_exception); // out: Submit -JSS(error_message); // out: error -JSS(escrow); // in: LedgerEntry +JSS(error_code); // out: error +JSS(error_exception); // out: Submit +JSS(error_message); // out: error +JSS(escrow); // in: LedgerEntry +JSS(estimated_time_remaining); JSS(emitted_txn); // in: LedgerEntry JSS(expand); // in: handler/Ledger JSS(expected_date); // out: any (warnings) @@ -310,6 +316,7 @@ JSS(expiration); // out: AccountOffers, AccountChannels, // ValidatorList JSS(fail_hard); // in: Sign, Submit JSS(failed); // out: InboundLedger +JSS(failed_ledgers); // out: catalogue JSS(feature); // in: Feature JSS(features); // out: Feature JSS(fee); // out: NetworkOPs, Peers @@ -324,9 +331,12 @@ JSS(first); // out: rpc/Version JSS(firstSequence); // out: NodeToShardStatus JSS(firstShardIndex); // out: NodeToShardStatus JSS(finished); -JSS(fix_txns); // in: LedgerCleaner +JSS(fix_txns); // in: LedgerCleaner +JSS(file); +JSS(file_size); JSS(flags); // out: AccountOffers, // NetworkOPs +JSS(force); // in: catalogue JSS(forward); // in: AccountTx JSS(freeze); // out: AccountLines JSS(freeze_peer); // out: AccountLines @@ -336,6 +346,7 @@ JSS(full_reply); // out: PathFind JSS(fullbelow_size); // out: GetCounts JSS(good); // out: RPCVersion JSS(hash); // out: NetworkOPs, InboundLedger, +JSS(hash_mismatches); // out: catalogue // LedgerToJson, STTx; field JSS(hashes); // in: AccountObjects JSS(have_header); // out: InboundLedger @@ -354,8 +365,10 @@ JSS(id); // websocket. JSS(ident); // in: AccountCurrencies, AccountInfo, // OwnerInfo JSS(ignore_default); // in: AccountLines -JSS(import_vlseq); // in: LedgerEntry -JSS(inLedger); // out: tx/Transaction +JSS(ignore_hash); +JSS(import_vlseq); // in: LedgerEntry +JSS(imported); // out: catalogue +JSS(inLedger); // out: tx/Transaction JSS(in_queue); JSS(inbound); // out: PeerImp JSS(index); // in: LedgerEntry, DownloadShard @@ -374,42 +387,48 @@ JSS(issuer); // in: RipplePathFind, Subscribe, // out: STPathSet, STAmount JSS(job); JSS(job_queue); +JSS(job_type); +JSS(job_status); JSS(jobs); -JSS(jsonrpc); // json version -JSS(jq_trans_overflow); // JobQueue transaction limit overflow. -JSS(kept); // out: SubmitTransaction -JSS(key); // out -JSS(key_type); // in/out: WalletPropose, TransactionSign -JSS(latency); // out: PeerImp -JSS(last); // out: RPCVersion -JSS(lastSequence); // out: NodeToShardStatus -JSS(lastShardIndex); // out: NodeToShardStatus -JSS(last_close); // out: NetworkOPs -JSS(last_refresh_time); // out: ValidatorSite -JSS(last_refresh_status); // out: ValidatorSite -JSS(last_refresh_message); // out: ValidatorSite -JSS(ledger); // in: NetworkOPs, LedgerCleaner, - // RPCHelpers - // out: NetworkOPs, PeerImp -JSS(ledger_current_index); // out: NetworkOPs, RPCHelpers, - // LedgerCurrent, LedgerAccept, - // AccountLines -JSS(ledger_data); // out: LedgerHeader -JSS(ledger_hash); // in: RPCHelpers, LedgerRequest, - // RipplePathFind, TransactionEntry, - // handlers/Ledger - // out: NetworkOPs, RPCHelpers, - // LedgerClosed, LedgerData, - // AccountLines -JSS(ledger_hit_rate); // out: GetCounts -JSS(ledger_index); // in/out: many -JSS(ledger_index_max); // in, out: AccountTx* -JSS(ledger_index_min); // in, out: AccountTx* -JSS(ledger_max); // in, out: AccountTx* -JSS(ledger_min); // in, out: AccountTx* -JSS(ledger_time); // out: NetworkOPs -JSS(LEDGER_ENTRY_TYPES); // out: RPC server_definitions -JSS(levels); // LogLevels +JSS(jsonrpc); // json version +JSS(jq_trans_overflow); // JobQueue transaction limit overflow. +JSS(kept); // out: SubmitTransaction +JSS(key); // out +JSS(key_type); // in/out: WalletPropose, TransactionSign +JSS(latency); // out: PeerImp +JSS(last); // out: RPCVersion +JSS(lastSequence); // out: NodeToShardStatus +JSS(lastShardIndex); // out: NodeToShardStatus +JSS(last_close); // out: NetworkOPs +JSS(last_refresh_time); // out: ValidatorSite +JSS(last_refresh_status); // out: ValidatorSite +JSS(last_refresh_message); // out: ValidatorSite +JSS(ledger); // in: NetworkOPs, LedgerCleaner, + // RPCHelpers + // out: NetworkOPs, PeerImp +JSS(ledger_count); +JSS(ledgers_loaded); +JSS(ledgers_written); +JSS(ledger_current_index); // out: NetworkOPs, RPCHelpers, + // LedgerCurrent, LedgerAccept, + // AccountLines +JSS(ledger_data); // out: LedgerHeader +JSS(ledger_hash); // in: RPCHelpers, LedgerRequest, + // RipplePathFind, TransactionEntry, + // handlers/Ledger + // out: NetworkOPs, RPCHelpers, + // LedgerClosed, LedgerData, + // AccountLines +JSS(ledger_hit_rate); // out: GetCounts +JSS(ledger_index); // in/out: many +JSS(ledger_index_max); // in, out: AccountTx* +JSS(ledger_index_min); // in, out: AccountTx* +JSS(ledger_max); // in, out: AccountTx* +JSS(ledger_min); // in, out: AccountTx* +JSS(ledger_time); // out: NetworkOPs +JSS(LEDGER_ENTRY_TYPES); // out: RPC server_definitions +JSS(levels); // LogLevels +JSS(level); JSS(limit); // in/out: AccountTx*, AccountOffers, // AccountLines, AccountObjects // in: LedgerData, BookOffers @@ -527,6 +546,8 @@ JSS(password); // in: Subscribe JSS(paths); // in: RipplePathFind JSS(paths_canonical); // out: RipplePathFind JSS(paths_computed); // out: PathRequest, RipplePathFind +JSS(output_file); // in: CatalogueCreate +JSS(input_file); // in: CatalogueLoad JSS(payment_channel); // in: LedgerEntry JSS(pclose); JSS(peer); // in: AccountLines @@ -536,6 +557,7 @@ JSS(peers); // out: InboundLedger, handlers/Peers, Overlay JSS(peer_disconnects); // Severed peer connection counter. JSS(peer_disconnects_resources); // Severed peer connections because of // excess resource consumption. +JSS(percent_complete); JSS(phash); JSS(port); // in: Connect JSS(previous); // out: Reservations @@ -618,6 +640,7 @@ JSS(signing_keys); // out: ValidatorList JSS(signing_time); // out: NetworkOPs JSS(signer_list); // in: AccountObjects JSS(signer_lists); // in/out: AccountInfo +JSS(skipped); // out: catalogue JSS(snapshot); // in: Subscribe JSS(source_account); // in: PathRequest, RipplePathFind JSS(source_amount); // in: PathRequest, RipplePathFind @@ -625,6 +648,7 @@ JSS(source_currencies); // in: PathRequest, RipplePathFind JSS(source_tag); // out: AccountChannels JSS(stand_alone); // out: NetworkOPs JSS(start); // in: TxHistory +JSS(start_time); JSS(started); JSS(state); // out: Logic.h, ServerState, LedgerData JSS(state_accounting); // out: NetworkOPs diff --git a/src/ripple/rpc/handlers/Catalogue.cpp b/src/ripple/rpc/handlers/Catalogue.cpp new file mode 100644 index 000000000..28768d50d --- /dev/null +++ b/src/ripple/rpc/handlers/Catalogue.cpp @@ -0,0 +1,1141 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012-2014 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +namespace ripple { + +using time_point = NetClock::time_point; +using duration = NetClock::duration; + +#define CATL 0x4C544143UL /*"CATL" in LE*/ + +// Replace the current version constant +static constexpr uint16_t CATALOGUE_VERSION = 1; + +// Instead use these definitions +static constexpr uint16_t CATALOGUE_VERSION_MASK = + 0x00FF; // Lower 8 bits for version +static constexpr uint16_t CATALOGUE_COMPRESS_LEVEL_MASK = + 0x0F00; // Bits 8-11: compression level +static constexpr uint16_t CATALOGUE_RESERVED_MASK = + 0xF000; // Bits 12-15: reserved + +// Helper functions for version field manipulation +inline uint8_t +getCatalogueVersion(uint16_t versionField) +{ + return versionField & CATALOGUE_VERSION_MASK; +} + +inline uint8_t +getCompressionLevel(uint16_t versionField) +{ + return (versionField & CATALOGUE_COMPRESS_LEVEL_MASK) >> 8; +} + +inline bool +isCompressed(uint16_t versionField) +{ + return getCompressionLevel(versionField) > 0; +} + +inline uint16_t +makeCatalogueVersionField(uint8_t version, uint8_t compressionLevel = 0) +{ // 0 = no compression + + // Ensure compression level is within valid range (0-9) + if (compressionLevel > 9) + compressionLevel = 9; + + uint16_t result = version & CATALOGUE_VERSION_MASK; + result |= (compressionLevel << 8); // Store level in bits 8-11 + return result; +} + +// Helper function to convert binary hash to hex string +std::string +toHexString(unsigned char const* data, size_t len) +{ + static char const* hexDigits = "0123456789ABCDEF"; + std::string result; + result.reserve(2 * len); + for (size_t i = 0; i < len; ++i) + { + unsigned char c = data[i]; + result.push_back(hexDigits[c >> 4]); + result.push_back(hexDigits[c & 15]); + } + return result; +} + +#pragma pack(push, 1) // pack the struct tightly +struct CATLHeader +{ + uint32_t magic = CATL; + uint32_t min_ledger; + uint32_t max_ledger; + uint16_t version; + uint16_t network_id; + uint64_t filesize = 0; // Total size of the file including header + std::array hash = {}; // SHA-512 hash, initially set to zeros +}; +#pragma pack(pop) + +enum class CatalogueJobType { CREATE, LOAD }; + +struct CatalogueRunStatus +{ + bool isRunning = false; + std::chrono::system_clock::time_point started; + uint32_t minLedger; + uint32_t maxLedger; + uint32_t ledgerUpto; + CatalogueJobType jobType; + std::string filename; + uint8_t compressionLevel = 0; + std::string hash; // Hex-encoded hash + uint64_t filesize = 0; // File size in bytes +}; + +// Global status for catalogue operations +static std::shared_mutex + catalogueStatusMutex; // Protects access to the status object +static CatalogueRunStatus catalogueRunStatus; // Always in memory + +// Macro to simplify common patterns +#define UPDATE_CATALOGUE_STATUS(field, value) \ + { \ + std::unique_lock writeLock(catalogueStatusMutex); \ + catalogueRunStatus.field = value; \ + } + +// Helper function to generate status JSON +// IMPORTANT: Caller must hold at least a shared (read) lock on +// catalogueStatusMutex before calling this function +inline Json::Value +generateStatusJson(bool includeErrorInfo = false) +{ + Json::Value jvResult; + + if (catalogueRunStatus.isRunning) + { + jvResult[jss::job_status] = "job_in_progress"; + jvResult[jss::min_ledger] = catalogueRunStatus.minLedger; + jvResult[jss::max_ledger] = catalogueRunStatus.maxLedger; + jvResult[jss::current_ledger] = catalogueRunStatus.ledgerUpto; + + // Calculate percentage complete - FIX: Handle ledgerUpto = 0 case + // properly + uint32_t total_ledgers = + catalogueRunStatus.maxLedger - catalogueRunStatus.minLedger + 1; + + // If ledgerUpto is 0, it means no progress has been made yet + uint32_t processed_ledgers = (catalogueRunStatus.ledgerUpto == 0) + ? 0 + : catalogueRunStatus.ledgerUpto - catalogueRunStatus.minLedger + 1; + + if (processed_ledgers > total_ledgers) + processed_ledgers = total_ledgers; // Safety check + + int percentage = (total_ledgers > 0) + ? static_cast((processed_ledgers * 100) / total_ledgers) + : 0; + jvResult[jss::percent_complete] = percentage; + + // Calculate elapsed time + auto now = std::chrono::system_clock::now(); + auto elapsed = std::chrono::duration_cast( + now - catalogueRunStatus.started) + .count(); + jvResult[jss::elapsed_seconds] = static_cast(elapsed); + + // Calculate estimated time remaining + if (processed_ledgers > 0 && total_ledgers > processed_ledgers) + { + // Calculate rate: ledgers per second + double ledgers_per_second = + static_cast(processed_ledgers) / elapsed; + + if (ledgers_per_second > 0) + { + // Calculate remaining time in seconds + uint32_t remaining_ledgers = total_ledgers - processed_ledgers; + uint64_t estimated_seconds_remaining = static_cast( + remaining_ledgers / ledgers_per_second); + + // Format the time remaining in human-readable form + std::string time_remaining; + if (estimated_seconds_remaining > 3600) + { + // Hours and minutes + uint64_t hours = estimated_seconds_remaining / 3600; + uint64_t minutes = + (estimated_seconds_remaining % 3600) / 60; + time_remaining = std::to_string(hours) + " hour" + + (hours > 1 ? "s" : "") + " " + std::to_string(minutes) + + " minute" + (minutes > 1 ? "s" : ""); + } + else if (estimated_seconds_remaining > 60) + { + // Minutes and seconds + uint64_t minutes = estimated_seconds_remaining / 60; + uint64_t seconds = estimated_seconds_remaining % 60; + time_remaining = std::to_string(minutes) + " minute" + + (minutes > 1 ? "s" : "") + " " + + std::to_string(seconds) + " second" + + (seconds > 1 ? "s" : ""); + } + else + { + // Just seconds + time_remaining = + std::to_string(estimated_seconds_remaining) + + " second" + + (estimated_seconds_remaining > 1 ? "s" : ""); + } + jvResult[jss::estimated_time_remaining] = time_remaining; + } + else + { + jvResult[jss::estimated_time_remaining] = "unknown"; + } + } + else + { + jvResult[jss::estimated_time_remaining] = "unknown"; + } + + // Add start time as ISO 8601 string + auto time_t_started = + std::chrono::system_clock::to_time_t(catalogueRunStatus.started); + std::tm* tm_started = std::gmtime(&time_t_started); + char time_buffer[30]; + std::strftime( + time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%SZ", tm_started); + jvResult[jss::start_time] = time_buffer; + + // Add job type + jvResult[jss::job_type] = + (catalogueRunStatus.jobType == CatalogueJobType::CREATE) + ? "catalogue_create" + : "catalogue_load"; + + // Add filename + jvResult[jss::file] = catalogueRunStatus.filename; + + // Add compression level if applicable + if (catalogueRunStatus.compressionLevel > 0) + { + jvResult[jss::compression_level] = + catalogueRunStatus.compressionLevel; + } + + // Add hash if available + if (!catalogueRunStatus.hash.empty()) + { + jvResult[jss::hash] = catalogueRunStatus.hash; + } + + // Add filesize if available + if (catalogueRunStatus.filesize > 0) + { + jvResult[jss::file_size] = Json::UInt(catalogueRunStatus.filesize); + } + + if (includeErrorInfo) + { + jvResult[jss::error] = "busy"; + jvResult[jss::error_message] = + "Another catalogue operation is in progress"; + } + } + else + { + jvResult[jss::job_status] = "no_job_running"; + } + + return jvResult; +} + +Json::Value +doCatalogueStatus(RPC::JsonContext& context) +{ + // Use a shared lock (read lock) to check status without blocking other + // readers + std::shared_lock lock(catalogueStatusMutex); + return generateStatusJson(); +} + +Json::Value +doCatalogueCreate(RPC::JsonContext& context) +{ + // Try to acquire write lock to check if an operation is running + { + std::unique_lock writeLock( + catalogueStatusMutex, std::try_to_lock); + if (!writeLock.owns_lock()) + { + // Couldn't get the lock, so another thread is accessing the status + // Try a shared lock to get the status + std::shared_lock readLock(catalogueStatusMutex); + return generateStatusJson(true); + } + + // We have the write lock, check if an operation is already running + if (catalogueRunStatus.isRunning) + { + return generateStatusJson(true); + } + + // No operation running, set up our operation + catalogueRunStatus.isRunning = true; + } + // Write lock is released here, allowing status checks while operation runs + + // Ensure we reset the running flag when we're done + struct OpCleanup + { + ~OpCleanup() + { + std::unique_lock writeLock(catalogueStatusMutex); + catalogueRunStatus.isRunning = false; + } + } opCleanup; + + if (!context.params.isMember(jss::min_ledger) || + !context.params.isMember(jss::max_ledger)) + return rpcError( + rpcINVALID_PARAMS, "expected min_ledger and max_ledger"); + + std::string filepath; + struct stat st; + uint64_t file_size = 0; + + if (!context.params.isMember(jss::output_file) || + (filepath = context.params[jss::output_file].asString()).empty() || + filepath.front() != '/') + return rpcError( + rpcINVALID_PARAMS, + "expected output_file: "); + + uint8_t compressionLevel = 0; // Default: no compression + + if (context.params.isMember(jss::compression_level)) + { + if (context.params[jss::compression_level].isInt() || + context.params[jss::compression_level].isUInt()) + { + // Handle numeric value between 0 and 9 + compressionLevel = context.params[jss::compression_level].asUInt(); + if (compressionLevel > 9) + compressionLevel = 9; + } + else if (context.params[jss::compression_level].isBool()) + { + // Handle boolean: true means 6, false means 0 + compressionLevel = + context.params[jss::compression_level].asBool() ? 6 : 0; + } + } + + // Check output file isn't already populated and can be written to + { + struct stat st; + if (stat(filepath.c_str(), &st) == 0) + { // file exists + if (st.st_size > 0) + return rpcError( + rpcINVALID_PARAMS, + "output_file already exists and is non-empty"); + } + else if (errno != ENOENT) + return rpcError( + rpcINTERNAL, + "cannot stat output_file: " + std::string(strerror(errno))); + + std::ofstream testWrite(filepath.c_str(), std::ios::out); + if (testWrite.fail()) + return rpcError( + rpcINTERNAL, + "output_file location is not writeable: " + + std::string(strerror(errno))); + testWrite.close(); + } + + std::ofstream outfile(filepath.c_str(), std::ios::out | std::ios::binary); + if (outfile.fail()) + return rpcError( + rpcINTERNAL, + "failed to open output_file: " + std::string(strerror(errno))); + + uint32_t min_ledger = context.params[jss::min_ledger].asUInt(); + uint32_t max_ledger = context.params[jss::max_ledger].asUInt(); + + if (min_ledger > max_ledger) + return rpcError(rpcINVALID_PARAMS, "min_ledger must be <= max_ledger"); + + // Initialize status tracking + { + std::unique_lock writeLock(catalogueStatusMutex); + catalogueRunStatus.isRunning = true; + catalogueRunStatus.started = std::chrono::system_clock::now(); + catalogueRunStatus.minLedger = min_ledger; + catalogueRunStatus.maxLedger = max_ledger; + catalogueRunStatus.ledgerUpto = + 0; // Initialize to 0 to indicate no progress yet + catalogueRunStatus.jobType = CatalogueJobType::CREATE; + catalogueRunStatus.filename = filepath; + catalogueRunStatus.compressionLevel = compressionLevel; + catalogueRunStatus.hash.clear(); // No hash yet + } + + // Create and write header with zero hash + CATLHeader header; + header.min_ledger = min_ledger; + header.max_ledger = max_ledger; + header.version = + makeCatalogueVersionField(CATALOGUE_VERSION, compressionLevel); + header.network_id = context.app.config().NETWORK_ID; + // hash is already zero-initialized + + outfile.write(reinterpret_cast(&header), sizeof(CATLHeader)); + if (outfile.fail()) + return rpcError( + rpcINTERNAL, + "failed to write header: " + std::string(strerror(errno))); + + auto compStream = std::make_unique(); + if (compressionLevel > 0) + { + JLOG(context.j.info()) + << "Setting up compression with level " << (int)compressionLevel; + + boost::iostreams::zlib_params params((int)compressionLevel); + params.window_bits = 15; + params.noheader = false; + compStream->push(boost::iostreams::zlib_compressor(params)); + } + else + { + JLOG(context.j.info()) + << "No compression (level 0), using direct output"; + } + compStream->push(boost::ref(outfile)); + + // Process ledgers with local processor implementation + auto writeToFile = [&compStream, &context](const void* data, size_t size) { + compStream->write(reinterpret_cast(data), size); + if (compStream->fail()) + { + JLOG(context.j.error()) + << "Failed to write to output file: " << std::strerror(errno); + return false; + } + return true; + }; + + // Modified outputLedger to work with individual ledgers instead of a vector + auto outputLedger = + [&writeToFile, &context, &compStream]( + std::shared_ptr ledger, + std::optional> prevStateMap = + std::nullopt) -> bool { + try + { + auto const& info = ledger->info(); + + uint64_t closeTime = info.closeTime.time_since_epoch().count(); + uint64_t parentCloseTime = + info.parentCloseTime.time_since_epoch().count(); + uint32_t closeTimeResolution = info.closeTimeResolution.count(); + uint64_t drops = info.drops.drops(); + + // Write ledger header information + if (!writeToFile(&info.seq, sizeof(info.seq)) || + !writeToFile(info.hash.data(), 32) || + !writeToFile(info.txHash.data(), 32) || + !writeToFile(info.accountHash.data(), 32) || + !writeToFile(info.parentHash.data(), 32) || + !writeToFile(&drops, sizeof(drops)) || + !writeToFile(&info.closeFlags, sizeof(info.closeFlags)) || + !writeToFile( + &closeTimeResolution, sizeof(closeTimeResolution)) || + !writeToFile(&closeTime, sizeof(closeTime)) || + !writeToFile(&parentCloseTime, sizeof(parentCloseTime))) + { + return false; + } + + size_t stateNodesWritten = + ledger->stateMap().serializeToStream(*compStream, prevStateMap); + size_t txNodesWritten = + ledger->txMap().serializeToStream(*compStream); + + JLOG(context.j.info()) << "Ledger " << info.seq << ": Wrote " + << stateNodesWritten << " state nodes, " + << "and " << txNodesWritten << " tx nodes"; + + return true; + } + catch (std::exception const& e) + { + JLOG(context.j.error()) << "Error processing ledger " + << ledger->info().seq << ": " << e.what(); + return false; + } + }; + + // Instead of loading all ledgers at once, process them in a sliding window + // of two + std::shared_ptr prevLedger = nullptr; + std::shared_ptr currLedger = nullptr; + uint32_t ledgers_written = 0; + + JLOG(context.j.info()) << "Starting to stream ledgers from " << min_ledger + << " to " << max_ledger; + + // Process the first ledger completely + { + UPDATE_CATALOGUE_STATUS(ledgerUpto, min_ledger); + + // Load the first ledger + auto status = RPC::getLedger(currLedger, min_ledger, context); + if (status.toErrorCode() != rpcSUCCESS) + return rpcError(status); + if (!currLedger) + return rpcError(rpcLEDGER_MISSING); + + if (!outputLedger(currLedger)) + return rpcError( + rpcINTERNAL, "Error occurred while processing first ledger"); + + ledgers_written++; + prevLedger = currLedger; + } + + // Process remaining ledgers with diffs + for (uint32_t ledger_seq = min_ledger + 1; ledger_seq <= max_ledger; + ++ledger_seq) + { + if (context.app.isStopping()) + return {}; + + // Update current ledger in status + UPDATE_CATALOGUE_STATUS(ledgerUpto, ledger_seq); + + // Load the next ledger + currLedger = nullptr; // Release any previous current ledger + auto status = RPC::getLedger(currLedger, ledger_seq, context); + if (status.toErrorCode() != rpcSUCCESS) + return rpcError(status); + if (!currLedger) + return rpcError(rpcLEDGER_MISSING); + + // Process with diff against previous ledger + if (!outputLedger(currLedger, prevLedger->stateMap())) + return rpcError( + rpcINTERNAL, "Error occurred while processing ledgers"); + + ledgers_written++; + + // Cycle the ledgers: current becomes previous, we'll load a new current + // next iteration + prevLedger = currLedger; + } + + // flush and finish + compStream->flush(); + compStream->reset(); + outfile.flush(); + outfile.close(); + + // Clear ledger references to release memory + prevLedger = nullptr; + currLedger = nullptr; + + // Get the file size and update it in the header + if (stat(filepath.c_str(), &st) != 0) + { + JLOG(context.j.warn()) + << "Could not get file size: " << std::strerror(errno); + return rpcError( + rpcINTERNAL, "failed to get file size for header update"); + } + + file_size = st.st_size; + + // Update header with filesize + JLOG(context.j.info()) << "Updating file size in header: " + << std::to_string(file_size) << " bytes"; + + header.filesize = file_size; + std::fstream updateFileSizeFile( + filepath.c_str(), std::ios::in | std::ios::out | std::ios::binary); + if (updateFileSizeFile.fail()) + return rpcError( + rpcINTERNAL, + "cannot open file for updating filesize: " + + std::string(strerror(errno))); + + updateFileSizeFile.seekp(0, std::ios::beg); + updateFileSizeFile.write( + reinterpret_cast(&header), sizeof(CATLHeader)); + updateFileSizeFile.close(); + + // Now compute the hash over the entire file + JLOG(context.j.info()) << "Computing catalogue hash..."; + + std::ifstream hashFile(filepath.c_str(), std::ios::in | std::ios::binary); + if (hashFile.fail()) + return rpcError( + rpcINTERNAL, + "cannot open file for hashing: " + std::string(strerror(errno))); + + // Initialize hasher + sha512_hasher hasher; + + // Create a buffer for reading + std::vector buffer(64 * 1024); // 64K buffer + + // Read and process the header portion + hashFile.read(buffer.data(), sizeof(CATLHeader)); + if (hashFile.gcount() != sizeof(CATLHeader)) + return rpcError(rpcINTERNAL, "failed to read header for hashing"); + + // Zero out the hash portion in the buffer for hash calculation + std::fill( + buffer.data() + offsetof(CATLHeader, hash), + buffer.data() + offsetof(CATLHeader, hash) + sizeof(header.hash), + 0); + + // Add the modified header to the hash + hasher(buffer.data(), sizeof(CATLHeader)); + + // Read and hash the rest of the file + while (hashFile) + { + hashFile.read(buffer.data(), buffer.size()); + std::streamsize bytes_read = hashFile.gcount(); + if (bytes_read > 0) + hasher(buffer.data(), bytes_read); + } + hashFile.close(); + + // Get the hash result + auto hash_result = static_cast(hasher); + + // Update the hash in the file + std::fstream updateFile( + filepath.c_str(), std::ios::in | std::ios::out | std::ios::binary); + if (updateFile.fail()) + return rpcError( + rpcINTERNAL, + "cannot open file for updating hash: " + + std::string(strerror(errno))); + + updateFile.seekp(offsetof(CATLHeader, hash), std::ios::beg); + updateFile.write( + reinterpret_cast(hash_result.data()), hash_result.size()); + updateFile.close(); + + // Convert hash to hex string + std::string hash_hex = toHexString(hash_result.data(), hash_result.size()); + + // Update status with hash and filesize + UPDATE_CATALOGUE_STATUS(hash, hash_hex); + UPDATE_CATALOGUE_STATUS(filesize, file_size); + + Json::Value jvResult; + jvResult[jss::min_ledger] = min_ledger; + jvResult[jss::max_ledger] = max_ledger; + jvResult[jss::output_file] = filepath; + jvResult[jss::file_size] = Json::UInt(file_size); + jvResult[jss::ledgers_written] = static_cast(ledgers_written); + jvResult[jss::status] = jss::success; + jvResult[jss::compression_level] = compressionLevel; + jvResult[jss::hash] = hash_hex; + + return jvResult; +} + +Json::Value +doCatalogueLoad(RPC::JsonContext& context) +{ + // Try to acquire write lock to check if an operation is running + { + std::unique_lock writeLock( + catalogueStatusMutex, std::try_to_lock); + if (!writeLock.owns_lock()) + { + // Couldn't get the lock, so another thread is accessing the status + // Try a shared lock to get the status + std::shared_lock readLock(catalogueStatusMutex); + return generateStatusJson(true); + } + + // We have the write lock, check if an operation is already running + if (catalogueRunStatus.isRunning) + { + return generateStatusJson(true); + } + + // No operation running, set up our operation + catalogueRunStatus.isRunning = true; + } + // Write lock is released here, allowing status checks while operation runs + + // Ensure we reset the running flag when we're done + struct OpCleanup + { + ~OpCleanup() + { + std::unique_lock writeLock(catalogueStatusMutex); + catalogueRunStatus.isRunning = false; + } + } opCleanup; + + if (!context.params.isMember(jss::input_file)) + return rpcError(rpcINVALID_PARAMS, "expected input_file"); + + // Check for ignore_hash parameter + bool ignore_hash = false; + if (context.params.isMember(jss::ignore_hash)) + ignore_hash = context.params[jss::ignore_hash].asBool(); + + std::string filepath = context.params[jss::input_file].asString(); + if (filepath.empty() || filepath.front() != '/') + return rpcError( + rpcINVALID_PARAMS, + "expected input_file: "); + + JLOG(context.j.info()) << "Opening catalogue file: " << filepath; + + // Check file size before attempting to read + struct stat st; + if (stat(filepath.c_str(), &st) != 0) + return rpcError( + rpcINTERNAL, + "cannot stat input_file: " + std::string(strerror(errno))); + + uint64_t file_size = st.st_size; + + // Minimal size check: at least a header must be present + if (file_size < sizeof(CATLHeader)) + return rpcError( + rpcINVALID_PARAMS, + "input_file too small (only " + std::to_string(file_size) + + " bytes), must be at least " + + std::to_string(sizeof(CATLHeader)) + " bytes"); + + JLOG(context.j.info()) << "Catalogue file size: " << file_size << " bytes"; + + // Check if file exists and is readable + std::ifstream infile(filepath.c_str(), std::ios::in | std::ios::binary); + if (infile.fail()) + return rpcError( + rpcINTERNAL, + "cannot open input_file: " + std::string(strerror(errno))); + + JLOG(context.j.info()) << "Reading catalogue header..."; + + // Read and validate header + CATLHeader header; + infile.read(reinterpret_cast(&header), sizeof(CATLHeader)); + if (infile.fail()) + return rpcError(rpcINTERNAL, "failed to read catalogue header"); + + if (header.magic != CATL) + return rpcError(rpcINVALID_PARAMS, "invalid catalogue file magic"); + + // Save the hash from the header + std::array stored_hash = header.hash; + std::string hash_hex = toHexString(stored_hash.data(), stored_hash.size()); + + // Extract version information + uint8_t version = getCatalogueVersion(header.version); + uint8_t compressionLevel = getCompressionLevel(header.version); + + // Initialize status tracking + { + std::unique_lock writeLock(catalogueStatusMutex); + catalogueRunStatus.isRunning = true; + catalogueRunStatus.started = std::chrono::system_clock::now(); + catalogueRunStatus.minLedger = header.min_ledger; + catalogueRunStatus.maxLedger = header.max_ledger; + catalogueRunStatus.ledgerUpto = + 0; // Initialize to 0 to indicate no progress yet + catalogueRunStatus.jobType = CatalogueJobType::LOAD; + catalogueRunStatus.filename = filepath; + catalogueRunStatus.compressionLevel = compressionLevel; + catalogueRunStatus.hash = hash_hex; + catalogueRunStatus.filesize = header.filesize; + } + + JLOG(context.j.info()) << "Catalogue version: " << (int)version; + JLOG(context.j.info()) << "Compression level: " << (int)compressionLevel; + JLOG(context.j.info()) << "Catalogue hash: " << hash_hex; + + // Check version compatibility + if (version > 1) // Only checking base version number + return rpcError( + rpcINVALID_PARAMS, + "unsupported catalogue version: " + std::to_string(version)); + + if (header.network_id != context.app.config().NETWORK_ID) + return rpcError( + rpcINVALID_PARAMS, + "catalogue network ID mismatch: " + + std::to_string(header.network_id)); + + // Check if actual filesize matches the one in the header + if (file_size != header.filesize) + { + JLOG(context.j.error()) + << "Catalogue file size mismatch. Header indicates " + << header.filesize << " bytes, but actual file size is " + << file_size << " bytes"; + return rpcError( + rpcINVALID_PARAMS, + "catalogue file size mismatch: expected " + + std::to_string(header.filesize) + " bytes, got " + + std::to_string(file_size) + " bytes"); + } + + JLOG(context.j.info()) << "Catalogue file size verified: " << file_size + << " bytes"; + + // Verify hash if not ignored + if (!ignore_hash && file_size > sizeof(CATLHeader)) + { + JLOG(context.j.info()) << "Verifying catalogue hash..."; + + // Close and reopen file for hash verification + infile.close(); + std::ifstream hashFile( + filepath.c_str(), std::ios::in | std::ios::binary); + if (hashFile.fail()) + return rpcError( + rpcINTERNAL, + "cannot reopen file for hash verification: " + + std::string(strerror(errno))); + + // Create a copy of the header with zeroed hash + CATLHeader hashHeader = header; + std::fill(hashHeader.hash.begin(), hashHeader.hash.end(), 0); + + // Initialize hasher + sha512_hasher hasher; + + // Add the modified header to the hash + hasher(&hashHeader, sizeof(CATLHeader)); + + // Read and hash the rest of the file + hashFile.seekg(sizeof(CATLHeader), std::ios::beg); + std::vector buffer(64 * 1024); // 64K buffer + while (hashFile) + { + if (context.app.isStopping()) + return {}; + + hashFile.read(buffer.data(), buffer.size()); + std::streamsize bytes_read = hashFile.gcount(); + if (bytes_read > 0) + hasher(buffer.data(), bytes_read); + } + hashFile.close(); + + // Get the computed hash + auto computed_hash = static_cast(hasher); + + // Compare with stored hash + if (!std::equal( + computed_hash.begin(), + computed_hash.end(), + stored_hash.begin())) + { + std::string computed_hex = + toHexString(computed_hash.data(), computed_hash.size()); + JLOG(context.j.error()) + << "Catalogue hash verification failed. Expected: " << hash_hex + << ", Computed: " << computed_hex; + return rpcError( + rpcINVALID_PARAMS, "catalogue hash verification failed"); + } + + JLOG(context.j.info()) << "Catalogue hash verified successfully"; + + // Reopen file for reading + infile.open(filepath.c_str(), std::ios::in | std::ios::binary); + if (infile.fail()) + return rpcError( + rpcINTERNAL, + "cannot reopen file after hash verification: " + + std::string(strerror(errno))); + + // Skip the header + infile.seekg(sizeof(CATLHeader), std::ios::beg); + } + + // Set up decompression if needed + auto decompStream = std::make_unique(); + if (compressionLevel > 0) + { + JLOG(context.j.info()) + << "Setting up decompression with level " << (int)compressionLevel; + boost::iostreams::zlib_params params((int)compressionLevel); + params.window_bits = 15; + params.noheader = false; + decompStream->push(boost::iostreams::zlib_decompressor(params)); + } + else + { + JLOG(context.j.info()) + << "No decompression needed (level 0), using direct input"; + } + decompStream->push(boost::ref(infile)); + + uint32_t ledgersLoaded = 0; + std::shared_ptr prevLedger; + uint32_t expected_seq = header.min_ledger; + + // Process each ledger sequentially + while (!decompStream->eof() && expected_seq <= header.max_ledger) + { + if (context.app.isStopping()) + return {}; + + // Update current ledger + UPDATE_CATALOGUE_STATUS(ledgerUpto, expected_seq); + + LedgerInfo info; + uint64_t closeTime = -1; + uint64_t parentCloseTime = -1; + uint32_t closeTimeResolution = -1; + uint64_t drops = -1; + + if (!decompStream->read( + reinterpret_cast(&info.seq), sizeof(info.seq)) || + !decompStream->read( + reinterpret_cast(info.hash.data()), 32) || + !decompStream->read( + reinterpret_cast(info.txHash.data()), 32) || + !decompStream->read( + reinterpret_cast(info.accountHash.data()), 32) || + !decompStream->read( + reinterpret_cast(info.parentHash.data()), 32) || + !decompStream->read( + reinterpret_cast(&drops), sizeof(drops)) || + !decompStream->read( + reinterpret_cast(&info.closeFlags), + sizeof(info.closeFlags)) || + !decompStream->read( + reinterpret_cast(&closeTimeResolution), + sizeof(closeTimeResolution)) || + !decompStream->read( + reinterpret_cast(&closeTime), sizeof(closeTime)) || + !decompStream->read( + reinterpret_cast(&parentCloseTime), + sizeof(parentCloseTime))) + { + JLOG(context.j.warn()) + << "Catalogue load expected but could not " + << "read the next ledger header at seq=" << expected_seq << ". " + << "Ledgers prior to this in the file (if any) were loaded."; + return rpcError(rpcINTERNAL, "Unexpected end of catalogue file."); + } + + info.closeTime = time_point{duration{closeTime}}; + info.parentCloseTime = time_point{duration{parentCloseTime}}; + info.closeTimeResolution = duration{closeTimeResolution}; + info.drops = drops; + + JLOG(context.j.info()) << "Found ledger " << info.seq << "..."; + + if (info.seq != expected_seq++) + { + JLOG(context.j.error()) + << "Expected ledger " << expected_seq << ", bailing"; + return rpcError( + rpcINTERNAL, + "Unexpected ledger out of sequence in catalogue file"); + } + + // Create a ledger object + std::shared_ptr ledger; + + if (info.seq == header.min_ledger) + { + // Base ledger - create a fresh one + ledger = std::make_shared( + info.seq, + info.closeTime, + context.app.config(), + context.app.getNodeFamily()); + + ledger->setLedgerInfo(info); + + // Deserialize the complete state map from leaf nodes + if (!ledger->stateMap().deserializeFromStream(*decompStream)) + { + JLOG(context.j.error()) + << "Failed to deserialize base ledger state"; + return rpcError( + rpcINTERNAL, "Failed to load base ledger state"); + } + } + else + { + // Delta ledger - start with a copy of the previous ledger + if (!prevLedger) + { + JLOG(context.j.error()) << "Missing previous ledger for delta"; + return rpcError(rpcINTERNAL, "Missing previous ledger"); + } + + auto snapshot = prevLedger->stateMap().snapShot(true); + + ledger = std::make_shared( + info, + context.app.config(), + context.app.getNodeFamily(), + *snapshot); + + // Apply delta (only leaf-node changes) + if (!ledger->stateMap().deserializeFromStream(*decompStream)) + { + JLOG(context.j.error()) + << "Failed to apply delta to ledger " << info.seq; + return rpcError(rpcINTERNAL, "Failed to apply ledger delta"); + } + } + + // pull in the tx map + if (!ledger->txMap().deserializeFromStream(*decompStream)) + { + JLOG(context.j.error()) + << "Failed to apply delta to ledger " << info.seq; + return rpcError(rpcINTERNAL, "Failed to apply ledger delta"); + } + + // Finalize the ledger + ledger->stateMap().flushDirty(hotACCOUNT_NODE); + ledger->txMap().flushDirty(hotTRANSACTION_NODE); + + ledger->setAccepted( + info.closeTime, + info.closeTimeResolution, + info.closeFlags & sLCF_NoConsensusTime); + + ledger->setValidated(); + ledger->setCloseFlags(info.closeFlags); + ledger->setImmutable(true); + + // we can double check the computed hashes now, since setImmutable + // recomputes the hashes + if (ledger->info().hash != info.hash) + { + JLOG(context.j.error()) + << "Ledger seq=" << info.seq + << " was loaded from catalogue, but computed hash does not " + "match. " + << "This ledger was not saved, and ledger loading from this " + "catalogue file ended here."; + return rpcError( + rpcINTERNAL, "Catalogue file contains a corrupted ledger."); + } + + // Save in database + pendSaveValidated(context.app, ledger, false, false); + + // Store in ledger master + context.app.getLedgerMaster().storeLedger(ledger, true); + + if (info.seq == header.max_ledger && + context.app.getLedgerMaster().getClosedLedger()->info().seq < + info.seq) + { + // Set as current ledger if this is the latest + context.app.getLedgerMaster().switchLCL(ledger); + } + + context.app.getLedgerMaster().setLedgerRangePresent( + header.min_ledger, info.seq, true); + + // Store the ledger + prevLedger = ledger; + ledgersLoaded++; + } + + decompStream->reset(); + infile.close(); + + JLOG(context.j.info()) << "Catalogue load complete! Loaded " + << ledgersLoaded << " ledgers from file size " + << file_size << " bytes"; + + Json::Value jvResult; + jvResult[jss::ledger_min] = header.min_ledger; + jvResult[jss::ledger_max] = header.max_ledger; + jvResult[jss::ledger_count] = + static_cast(header.max_ledger - header.min_ledger + 1); + jvResult[jss::ledgers_loaded] = static_cast(ledgersLoaded); + jvResult[jss::file_size] = Json::UInt(file_size); + jvResult[jss::status] = jss::success; + jvResult[jss::compression_level] = compressionLevel; + jvResult[jss::hash] = hash_hex; + jvResult[jss::ignore_hash] = ignore_hash; + + return jvResult; +} + +} // namespace ripple diff --git a/src/ripple/rpc/handlers/Handlers.h b/src/ripple/rpc/handlers/Handlers.h index 739c069f3..a297308da 100644 --- a/src/ripple/rpc/handlers/Handlers.h +++ b/src/ripple/rpc/handlers/Handlers.h @@ -170,6 +170,12 @@ Json::Value doValidatorListSites(RPC::JsonContext&); Json::Value doValidatorInfo(RPC::JsonContext&); +Json::Value +doCatalogueCreate(RPC::JsonContext&); +Json::Value +doCatalogueStatus(RPC::JsonContext&); +Json::Value +doCatalogueLoad(RPC::JsonContext&); } // namespace ripple #endif diff --git a/src/ripple/rpc/impl/Handler.cpp b/src/ripple/rpc/impl/Handler.cpp index 103c8622b..a3a605d66 100644 --- a/src/ripple/rpc/impl/Handler.cpp +++ b/src/ripple/rpc/impl/Handler.cpp @@ -174,6 +174,9 @@ Handler const handlerArray[]{ // Evented methods {"subscribe", byRef(&doSubscribe), Role::USER, NO_CONDITION}, {"unsubscribe", byRef(&doUnsubscribe), Role::USER, NO_CONDITION}, + {"catalogue_create", byRef(&doCatalogueCreate), Role::ADMIN, NO_CONDITION}, + {"catalogue_status", byRef(&doCatalogueStatus), Role::ADMIN, NO_CONDITION}, + {"catalogue_load", byRef(&doCatalogueLoad), Role::ADMIN, NO_CONDITION}, }; class HandlerTable diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 26d279dbd..a573425b7 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -570,6 +570,19 @@ getLedger(T& ledger, uint256 const& ledgerHash, Context& context) return Status::OK; } +// Helper function to determine if the types are compatible for assignment +template +struct is_assignable_shared_ptr : std::false_type +{ +}; + +template +struct is_assignable_shared_ptr< + std::shared_ptr&, + std::shared_ptr> : std::is_convertible +{ +}; + template Status getLedger(T& ledger, uint32_t ledgerIndex, Context& context) @@ -579,10 +592,16 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context) { if (context.app.config().reporting()) return {rpcLGR_NOT_FOUND, "ledgerNotFound"}; + auto cur = context.ledgerMaster.getCurrentLedger(); if (cur->info().seq == ledgerIndex) { - ledger = cur; + if constexpr (is_assignable_shared_ptr< + decltype(ledger), + decltype(cur)>::value) + { + ledger = cur; + } } } @@ -601,6 +620,9 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context) return Status::OK; } +#include +#include + template Status getLedger(T& ledger, LedgerShortcut shortcut, Context& context) @@ -635,7 +657,15 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context) return { rpcLGR_NOT_FOUND, "Reporting does not track current ledger"}; - ledger = context.ledgerMaster.getCurrentLedger(); + auto cur = context.ledgerMaster.getCurrentLedger(); + + if constexpr (is_assignable_shared_ptr< + decltype(ledger), + decltype(cur)>::value) + { + ledger = cur; + } + assert(ledger->open()); } else if (shortcut == LedgerShortcut::CLOSED) @@ -685,6 +715,15 @@ getLedger<>( template Status getLedger<>(std::shared_ptr&, uint256 const&, Context&); +template Status +getLedger<>(std::shared_ptr&, uint32_t, Context&); + +template Status +getLedger<>(std::shared_ptr&, LedgerShortcut shortcut, Context&); + +template Status +getLedger<>(std::shared_ptr&, uint256 const&, Context&); + bool isValidated( LedgerMaster& ledgerMaster, diff --git a/src/ripple/shamap/SHAMap.h b/src/ripple/shamap/SHAMap.h index 2d1aa192f..e7d8c24d7 100644 --- a/src/ripple/shamap/SHAMap.h +++ b/src/ripple/shamap/SHAMap.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -365,6 +366,35 @@ public: void invariants() const; +public: + /** + * Serialize a SHAMap to a stream, optionally as a delta from another map + * Only leaf nodes are serialized since inner nodes can be reconstructed. + * + * @param stream The output stream to write to + * @param writtenNodes Set to track written node hashes to avoid duplicates + * @param baseSHAMap Optional base map to compute delta against + * @return Number of nodes written + */ + template + std::size_t + serializeToStream( + StreamType& stream, + std::optional> baseSHAMap = + std::nullopt) const; + + /** + * Deserialize a SHAMap from a stream + * Reconstructs the full tree from leaf nodes. + * + * @param stream The input stream to read from + * @param baseSHAMap Optional base map to apply deltas to + * @return True if deserialization succeeded + */ + template + bool + deserializeFromStream(StreamType& stream); + private: using SharedPtrNodeStack = std::stack, SHAMapNodeID>>; diff --git a/src/ripple/shamap/SHAMapTreeNode.h b/src/ripple/shamap/SHAMapTreeNode.h index 8e351cce9..d8df3e8c0 100644 --- a/src/ripple/shamap/SHAMapTreeNode.h +++ b/src/ripple/shamap/SHAMapTreeNode.h @@ -43,11 +43,13 @@ static constexpr unsigned char const wireTypeInner = 2; static constexpr unsigned char const wireTypeCompressedInner = 3; static constexpr unsigned char const wireTypeTransactionWithMeta = 4; -enum class SHAMapNodeType { +enum SHAMapNodeType : uint8_t { tnINNER = 1, tnTRANSACTION_NM = 2, // transaction, no metadata tnTRANSACTION_MD = 3, // transaction, with metadata - tnACCOUNT_STATE = 4 + tnACCOUNT_STATE = 4, + tnREMOVE = 254, // special type to mark deleted nodes in serialization + tnTERMINAL = 255 // special type to mark the end of a serialization stream }; class SHAMapTreeNode diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index 1aab436c8..972cc64fb 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -1242,4 +1242,399 @@ SHAMap::invariants() const node->invariants(true); } +template +std::size_t +SHAMap::serializeToStream( + StreamType& stream, + std::optional> baseSHAMap) const +{ + // Static map to track bytes written to streams + static std::mutex streamMapMutex; + static std::unordered_map< + void*, + std::pair> + streamBytesWritten; + + // Flush threshold: 256 MiB + constexpr uint64_t flushThreshold = 256 * 1024 * 1024; + + // Local byte counter for this stream + uint64_t localBytesWritten = 0; + + // Single lambda that uses compile-time check for flush method existence + auto tryFlush = [](auto& s) { + if constexpr (requires(decltype(s) str) { str.flush(); }) + { + s.flush(); + } + // No-op if flush doesn't exist - compiler will optimize this branch out + }; + + // Get the current bytes written from the global map (with lock) + { + std::lock_guard lock(streamMapMutex); + auto it = streamBytesWritten.find(static_cast(&stream)); + if (it != streamBytesWritten.end()) + { + localBytesWritten = it->second.first; + } + + // Random cleanup of old entries (while we have the lock) + if (!streamBytesWritten.empty()) + { + auto now = std::chrono::steady_clock::now(); + size_t randomIndex = std::rand() % streamBytesWritten.size(); + auto cleanupIt = std::next(streamBytesWritten.begin(), randomIndex); + + // If entry is older than 5 minutes, remove it + if (now - cleanupIt->second.second > std::chrono::minutes(5)) + { + streamBytesWritten.erase(cleanupIt); + } + } + } + + std::unordered_set> writtenNodes; + + if (!root_) + return 0; + + std::size_t nodeCount = 0; + + auto serializeLeaf = [&stream, + &localBytesWritten, + flushThreshold, + &tryFlush](SHAMapLeafNode const& node) -> bool { + // write the node type + auto t = node.getType(); + stream.write(reinterpret_cast(&t), 1); + localBytesWritten += 1; + + // write the key + auto const key = node.peekItem()->key(); + stream.write(reinterpret_cast(key.data()), 32); + localBytesWritten += 32; + + // write the data size + auto data = node.peekItem()->slice(); + uint32_t size = data.size(); + stream.write(reinterpret_cast(&size), 4); + localBytesWritten += 4; + + // write the data + stream.write(reinterpret_cast(data.data()), size); + localBytesWritten += size; + + // Check if we should flush without locking + if (localBytesWritten >= flushThreshold) + { + tryFlush(stream); + localBytesWritten = 0; + } + + return !stream.fail(); + }; + + auto serializeRemovedLeaf = [&stream, + &localBytesWritten, + flushThreshold, + &tryFlush](uint256 const& key) -> bool { + // to indicate a node is removed it is written with a removal type + auto t = SHAMapNodeType::tnREMOVE; + stream.write(reinterpret_cast(&t), 1); + localBytesWritten += 1; + + // write the key + stream.write(reinterpret_cast(key.data()), 32); + localBytesWritten += 32; + + // Check if we should flush without locking + if (localBytesWritten >= flushThreshold) + { + tryFlush(stream); + localBytesWritten = 0; + } + + return !stream.fail(); + }; + + // If we're creating a delta, first compute the differences + if (baseSHAMap && baseSHAMap->get().root_) + { + const SHAMap& baseMap = baseSHAMap->get(); + + // Only compute delta if the maps are different + if (getHash() != baseMap.getHash()) + { + Delta differences; + + if (compare(baseMap, differences, std::numeric_limits::max())) + { + // Process each difference + for (auto const& [key, deltaItem] : differences) + { + auto const& newItem = deltaItem.first; + auto const& oldItem = deltaItem.second; + + if (!oldItem && newItem) + { + // Added item + SHAMapLeafNode* leaf = findKey(key); + if (leaf && serializeLeaf(*leaf)) + ++nodeCount; + } + else if (oldItem && !newItem) + { + // Removed item + if (serializeRemovedLeaf(key)) + ++nodeCount; + } + else if ( + oldItem && newItem && + oldItem->slice() != newItem->slice()) + { + // Modified item + SHAMapLeafNode* leaf = findKey(key); + if (leaf && serializeLeaf(*leaf)) + ++nodeCount; + } + } + + // write a terminal symbol to indicate the map stream has ended + auto t = SHAMapNodeType::tnTERMINAL; + stream.write(reinterpret_cast(&t), 1); + localBytesWritten += 1; + + // Check if we should flush without locking + if (localBytesWritten >= flushThreshold) + { + tryFlush(stream); + localBytesWritten = 0; + } + + // Update the global counter at the end (with lock) + { + std::lock_guard lock(streamMapMutex); + auto& streamData = + streamBytesWritten[static_cast(&stream)]; + streamData.first = localBytesWritten; + streamData.second = std::chrono::steady_clock::now(); + } + + return nodeCount; + } + } + else + { + // Maps are identical, nothing to write + return 0; + } + } + + // Otherwise walk the entire tree and serialize all leaf nodes + std::function walkTree = + [&](SHAMapTreeNode const& node, SHAMapNodeID const& nodeID) { + if (node.isLeaf()) + { + auto const& leaf = static_cast(node); + auto const& hash = leaf.getHash(); + + // Avoid duplicates + if (writtenNodes.insert(hash).second) + { + if (serializeLeaf(leaf)) + ++nodeCount; + } + return; + } + + // It's an inner node, process its children + auto const& inner = static_cast(node); + for (int i = 0; i < branchFactor; ++i) + { + if (!inner.isEmptyBranch(i)) + { + auto const& childHash = inner.getChildHash(i); + + // Skip already written nodes + if (writtenNodes.find(childHash) != writtenNodes.end()) + continue; + + auto childNode = + descendThrow(const_cast(&inner), i); + if (childNode) + { + SHAMapNodeID childID = nodeID.getChildNodeID(i); + walkTree(*childNode, childID); + } + } + } + }; + + // Start walking from root + walkTree(*root_, SHAMapNodeID()); + + // write a terminal symbol to indicate the map stream has ended + auto t = SHAMapNodeType::tnTERMINAL; + stream.write(reinterpret_cast(&t), 1); + localBytesWritten += 1; + + // Check if we should flush one last time without locking + if (localBytesWritten >= flushThreshold) + { + tryFlush(stream); + localBytesWritten = 0; + } + + // Update the global counter at the end (with lock) + { + std::lock_guard lock(streamMapMutex); + auto& streamData = streamBytesWritten[static_cast(&stream)]; + streamData.first = localBytesWritten; + streamData.second = std::chrono::steady_clock::now(); + } + + return nodeCount; +} + +template +bool +SHAMap::deserializeFromStream(StreamType& stream) +{ + try + { + JLOG(journal_.info()) << "Deserialization: Starting to deserialize " + "from stream"; + + if (state_ != SHAMapState::Modifying && state_ != SHAMapState::Synching) + return false; + + if (!root_) + root_ = std::make_shared(cowid_); + + // Define a lambda to deserialize a leaf node + auto deserializeLeaf = + [this, &stream](SHAMapNodeType& nodeType /* out */) -> bool { + stream.read(reinterpret_cast(&nodeType), 1); + + if (nodeType == SHAMapNodeType::tnTERMINAL) + { + // end of map + return false; + } + + uint256 key; + uint32_t size{0}; + + stream.read(reinterpret_cast(key.data()), 32); + + if (stream.fail()) + { + JLOG(journal_.error()) + << "Deserialization: stream stopped unexpectedly " + << "while trying to read key of next entry"; + return false; + } + + if (nodeType == SHAMapNodeType::tnREMOVE) + { + // deletion + if (!hasItem(key)) + { + JLOG(journal_.error()) + << "Deserialization: removal of key " << to_string(key) + << " but key is already absent."; + return false; + } + delItem(key); + return true; + } + + stream.read(reinterpret_cast(&size), 4); + + if (stream.fail()) + { + JLOG(journal_.error()) + << "Deserialization: stream stopped unexpectedly" + << " while trying to read size of data for key " + << to_string(key); + return false; + } + + if (size > 1024 * 1024 * 1024) + { + JLOG(journal_.error()) + << "Deserialization: size of " << to_string(key) + << " is suspiciously large (" << size + << " bytes), bailing."; + return false; + } + + std::vector data; + data.resize(size); + + stream.read(reinterpret_cast(data.data()), size); + if (stream.fail()) + { + JLOG(journal_.error()) + << "Deserialization: Unexpected EOF while reading data for " + << to_string(key); + return false; + } + + auto item = make_shamapitem(key, makeSlice(data)); + if (hasItem(key)) + return updateGiveItem(nodeType, std::move(item)); + + return addGiveItem(nodeType, std::move(item)); + }; + + SHAMapNodeType lastParsed; + while (!stream.eof() && deserializeLeaf(lastParsed)) + ; + + if (lastParsed != SHAMapNodeType::tnTERMINAL) + { + JLOG(journal_.error()) + << "Deserialization: Unexpected EOF, terminal node not found."; + return false; + } + + // Flush any dirty nodes and update hashes + flushDirty(hotUNKNOWN); + + return true; + } + catch (std::exception const& e) + { + JLOG(journal_.error()) + << "Exception during deserialization: " << e.what(); + return false; + } +} + +// explicit instantiation of templates for rpc::Catalogue + +using FilteringInputStream = boost::iostreams::filtering_stream< + boost::iostreams::input, + char, + std::char_traits, + std::allocator, + boost::iostreams::public_>; + +template bool +SHAMap::deserializeFromStream(FilteringInputStream&); + +using FilteringOutputStream = boost::iostreams::filtering_stream< + boost::iostreams::output, + char, + std::char_traits, + std::allocator, + boost::iostreams::public_>; + +template std::size_t +SHAMap::serializeToStream( + FilteringOutputStream&, + std::optional> baseSHAMap) const; + } // namespace ripple diff --git a/src/test/rpc/Catalogue_test.cpp b/src/test/rpc/Catalogue_test.cpp new file mode 100644 index 000000000..df326b1f3 --- /dev/null +++ b/src/test/rpc/Catalogue_test.cpp @@ -0,0 +1,865 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012-2017 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +#pragma pack(push, 1) // pack the struct tightly +struct TestCATLHeader +{ + uint32_t magic = 0x4C544143UL; + uint32_t min_ledger; + uint32_t max_ledger; + uint16_t version; + uint16_t network_id; + uint64_t filesize = 0; // Total size of the file including header + std::array hash = {}; // SHA-512 hash, initially set to zeros +}; +#pragma pack(pop) + +class Catalogue_test : public beast::unit_test::suite +{ + // Helper to create test ledger data with complex state changes + void + prepareLedgerData(test::jtx::Env& env, int numLedgers) + { + using namespace test::jtx; + Account alice{"alice"}; + Account bob{"bob"}; + Account charlie{"charlie"}; + + env.fund(XRP(10000), alice, bob, charlie); + env.close(); + + // Set up trust lines and issue currency + env(trust(bob, alice["USD"](1000))); + env(trust(charlie, bob["EUR"](1000))); + env.close(); + + env(pay(alice, bob, alice["USD"](500))); + env.close(); + + // Create and remove an offer to test state deletion + env(offer(bob, XRP(50), alice["USD"](1))); + auto offerSeq = + env.seq(bob) - 1; // Get the sequence of the offer we just created + env.close(); + + // Cancel the offer + env(offer_cancel(bob, offerSeq)); + env.close(); + + // Create another offer with same account + env(offer(bob, XRP(60), alice["USD"](2))); + env.close(); + + // Create a trust line and then remove it + env(trust(charlie, bob["EUR"](1000))); + env.close(); + env(trust(charlie, bob["EUR"](0))); + env.close(); + + // Recreate the same trust line + env(trust(charlie, bob["EUR"](2000))); + env.close(); + + // Additional ledgers with various transactions + for (int i = 0; i < numLedgers; ++i) + { + env(pay(alice, bob, XRP(100))); + env(offer(bob, XRP(50), alice["USD"](1))); + env.close(); + } + } + + void + testCatalogueCreateBadInput(FeatureBitset features) + { + testcase("catalogue_create: Invalid parameters"); + using namespace test::jtx; + Env env{ + *this, envconfig(), features, nullptr, beast::severities::kInfo}; + + // No parameters + { + auto const result = + env.client().invoke("catalogue_create", {})[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + + // Missing min_ledger + { + Json::Value params{Json::objectValue}; + params[jss::max_ledger] = 20; + params[jss::output_file] = "/tmp/test.catl"; + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + + // Missing max_ledger + { + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = 10; + params[jss::output_file] = "/tmp/test.catl"; + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + + // Missing output_file + { + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = 10; + params[jss::max_ledger] = 20; + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + + // Invalid output path (not absolute) + { + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = 10; + params[jss::max_ledger] = 20; + params[jss::output_file] = "test.catl"; + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + + // min_ledger > max_ledger + { + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = 20; + params[jss::max_ledger] = 10; + params[jss::output_file] = "/tmp/test.catl"; + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + } + + void + testCatalogueCreate(FeatureBitset features) + { + testcase("catalogue_create: Basic functionality"); + using namespace test::jtx; + + // Create environment and some test ledgers + Env env{ + *this, envconfig(), features, nullptr, beast::severities::kInfo}; + prepareLedgerData(env, 5); + + boost::filesystem::path tempDir = + boost::filesystem::temp_directory_path() / + boost::filesystem::unique_path(); + boost::filesystem::create_directories(tempDir); + + auto cataloguePath = (tempDir / "test.catl").string(); + + // Create catalogue + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = 3; + params[jss::max_ledger] = 5; + params[jss::output_file] = cataloguePath; + + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + + BEAST_EXPECT(result[jss::status] == jss::success); + BEAST_EXPECT(result[jss::min_ledger] == 3); + BEAST_EXPECT(result[jss::max_ledger] == 5); + BEAST_EXPECT(result[jss::output_file] == cataloguePath); + BEAST_EXPECT(result[jss::file_size].asUInt() > 0); + BEAST_EXPECT(result[jss::ledgers_written].asUInt() == 3); + + // Verify file exists and is not empty + BEAST_EXPECT(boost::filesystem::exists(cataloguePath)); + BEAST_EXPECT(boost::filesystem::file_size(cataloguePath) > 0); + + boost::filesystem::remove_all(tempDir); + } + + void + testCatalogueLoadBadInput(FeatureBitset features) + { + testcase("catalogue_load: Invalid parameters"); + using namespace test::jtx; + Env env{ + *this, envconfig(), features, nullptr, beast::severities::kInfo}; + + // No parameters + { + auto const result = + env.client().invoke("catalogue_load", {})[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + + // Missing input_file + { + Json::Value params{Json::objectValue}; + auto const result = + env.client().invoke("catalogue_load", params)[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + + // Invalid input path (not absolute) + { + Json::Value params{Json::objectValue}; + params[jss::input_file] = "test.catl"; + auto const result = + env.client().invoke("catalogue_load", params)[jss::result]; + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + + // Non-existent file + { + Json::Value params{Json::objectValue}; + params[jss::input_file] = "/tmp/nonexistent.catl"; + auto const result = + env.client().invoke("catalogue_load", params)[jss::result]; + BEAST_EXPECT(result[jss::error] == "internal"); + BEAST_EXPECT(result[jss::status] == "error"); + } + } + + void + testCatalogueLoadAndVerify(FeatureBitset features) + { + testcase("catalogue_load: Load and verify"); + using namespace test::jtx; + + // Create environment and test data + Env env{ + *this, envconfig(), features, nullptr, beast::severities::kInfo}; + prepareLedgerData(env, 5); + + // Store some key state information before catalogue creation + auto const sourceLedger = env.closed(); + auto const bobKeylet = keylet::account(Account("bob").id()); + auto const charlieKeylet = keylet::account(Account("charlie").id()); + auto const eurTrustKeylet = keylet::line( + Account("charlie").id(), + Account("bob").id(), + Currency(to_currency("EUR"))); + + // Get original state entries + auto const bobAcct = sourceLedger->read(bobKeylet); + auto const charlieAcct = sourceLedger->read(charlieKeylet); + auto const eurTrust = sourceLedger->read(eurTrustKeylet); + + BEAST_EXPECT(bobAcct != nullptr); + BEAST_EXPECT(charlieAcct != nullptr); + BEAST_EXPECT(eurTrust != nullptr); + + BEAST_EXPECT( + eurTrust->getFieldAmount(sfLowLimit).mantissa() == + 2000000000000000ULL); + + // Get initial complete_ledgers range + auto const originalCompleteLedgers = + env.app().getLedgerMaster().getCompleteLedgers(); + + // Create temporary directory for test files + boost::filesystem::path tempDir = + boost::filesystem::temp_directory_path() / + boost::filesystem::unique_path(); + boost::filesystem::create_directories(tempDir); + + auto cataloguePath = (tempDir / "test.catl").string(); + + // First create a catalogue + uint32_t minLedger = 3; + uint32_t maxLedger = sourceLedger->info().seq; + { + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = minLedger; + params[jss::max_ledger] = maxLedger; + params[jss::output_file] = cataloguePath; + + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::status] == jss::success); + } + + // Create a new environment for loading with unique port + Env loadEnv{ + *this, + test::jtx::envconfig(test::jtx::port_increment, 3), + features, + nullptr, + beast::severities::kInfo}; + + // Now load the catalogue + Json::Value params{Json::objectValue}; + params[jss::input_file] = cataloguePath; + + auto const result = + loadEnv.client().invoke("catalogue_load", params)[jss::result]; + + BEAST_EXPECT(result[jss::status] == jss::success); + BEAST_EXPECT(result[jss::ledger_min] == minLedger); + BEAST_EXPECT(result[jss::ledger_max] == maxLedger); + BEAST_EXPECT(result[jss::ledger_count] == (maxLedger - minLedger + 1)); + + // Verify complete_ledgers reflects loaded ledgers + auto const newCompleteLedgers = + loadEnv.app().getLedgerMaster().getCompleteLedgers(); + + BEAST_EXPECT(newCompleteLedgers == originalCompleteLedgers); + + // Verify the loaded state matches the original + auto const loadedLedger = loadEnv.closed(); + + // After loading each ledger + + // Compare all ledgers from 3 to 16 inclusive + for (std::uint32_t seq = 3; seq <= 16; ++seq) + { + auto const sourceLedger = + env.app().getLedgerMaster().getLedgerByHash( + env.app().getLedgerMaster().getHashBySeq(seq)); + + auto const loadedLedger = + loadEnv.app().getLedgerMaster().getLedgerByHash( + loadEnv.app().getLedgerMaster().getHashBySeq(seq)); + + if (!sourceLedger || !loadedLedger) + { + BEAST_EXPECT(false); // Test failure + continue; + } + + // Check basic ledger properties + BEAST_EXPECT(sourceLedger->info().seq == loadedLedger->info().seq); + BEAST_EXPECT( + sourceLedger->info().hash == loadedLedger->info().hash); + BEAST_EXPECT( + sourceLedger->info().txHash == loadedLedger->info().txHash); + BEAST_EXPECT( + sourceLedger->info().accountHash == + loadedLedger->info().accountHash); + BEAST_EXPECT( + sourceLedger->info().parentHash == + loadedLedger->info().parentHash); + BEAST_EXPECT( + sourceLedger->info().drops == loadedLedger->info().drops); + + // Check time-related properties + BEAST_EXPECT( + sourceLedger->info().closeFlags == + loadedLedger->info().closeFlags); + BEAST_EXPECT( + sourceLedger->info().closeTimeResolution.count() == + loadedLedger->info().closeTimeResolution.count()); + BEAST_EXPECT( + sourceLedger->info().closeTime.time_since_epoch().count() == + loadedLedger->info().closeTime.time_since_epoch().count()); + BEAST_EXPECT( + sourceLedger->info() + .parentCloseTime.time_since_epoch() + .count() == + loadedLedger->info() + .parentCloseTime.time_since_epoch() + .count()); + + // Check validation state + BEAST_EXPECT( + sourceLedger->info().validated == + loadedLedger->info().validated); + BEAST_EXPECT( + sourceLedger->info().accepted == loadedLedger->info().accepted); + + // Check SLE counts + std::size_t sourceCount = 0; + std::size_t loadedCount = 0; + + for (auto const& sle : sourceLedger->sles) + { + sourceCount++; + } + + for (auto const& sle : loadedLedger->sles) + { + loadedCount++; + } + + BEAST_EXPECT(sourceCount == loadedCount); + + // Check existence of imported keylets + for (auto const& sle : sourceLedger->sles) + { + auto const key = sle->key(); + bool exists = loadedLedger->exists(keylet::unchecked(key)); + BEAST_EXPECT(exists); + + // If it exists, check the serialized form matches + if (exists) + { + auto loadedSle = loadedLedger->read(keylet::unchecked(key)); + Serializer s1, s2; + sle->add(s1); + loadedSle->add(s2); + bool serializedEqual = (s1.peekData() == s2.peekData()); + BEAST_EXPECT(serializedEqual); + } + } + + // Check for extra keys in loaded ledger that aren't in source + for (auto const& sle : loadedLedger->sles) + { + auto const key = sle->key(); + BEAST_EXPECT(sourceLedger->exists(keylet::unchecked(key))); + } + } + + auto const loadedBobAcct = loadedLedger->read(bobKeylet); + auto const loadedCharlieAcct = loadedLedger->read(charlieKeylet); + auto const loadedEurTrust = loadedLedger->read(eurTrustKeylet); + + BEAST_EXPECT(!!loadedBobAcct); + BEAST_EXPECT(!!loadedCharlieAcct); + BEAST_EXPECT(!!loadedEurTrust); + + // Compare the serialized forms of the state objects + bool const loaded = + loadedBobAcct && loadedCharlieAcct && loadedEurTrust; + + Serializer s1, s2; + if (loaded) + { + bobAcct->add(s1); + loadedBobAcct->add(s2); + } + BEAST_EXPECT(loaded && s1.peekData() == s2.peekData()); + + if (loaded) + { + s1.erase(); + s2.erase(); + charlieAcct->add(s1); + loadedCharlieAcct->add(s2); + } + BEAST_EXPECT(loaded && s1.peekData() == s2.peekData()); + + if (loaded) + { + s1.erase(); + s2.erase(); + eurTrust->add(s1); + loadedEurTrust->add(s2); + } + + BEAST_EXPECT(loaded && s1.peekData() == s2.peekData()); + + // Verify trust line amount matches + BEAST_EXPECT( + loaded && + loadedEurTrust->getFieldAmount(sfLowLimit).mantissa() == + 2000000000000000ULL); + + boost::filesystem::remove_all(tempDir); + } + + void + testNetworkMismatch(FeatureBitset features) + { + testcase("catalogue_load: Network ID mismatch"); + using namespace test::jtx; + + boost::filesystem::path tempDir = + boost::filesystem::temp_directory_path() / + boost::filesystem::unique_path(); + boost::filesystem::create_directories(tempDir); + + auto cataloguePath = (tempDir / "test.catl").string(); + + // Create environment with different network IDs + { + Env env1{ + *this, + envconfig([](std::unique_ptr cfg) { + cfg->NETWORK_ID = 123; + return cfg; + }), + features, + nullptr, + beast::severities::kInfo}; + prepareLedgerData(env1, 5); + + // Create catalogue with network ID 123 + { + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = 3; + params[jss::max_ledger] = 5; + params[jss::output_file] = cataloguePath; + + auto const result = env1.client().invoke( + "catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::status] == jss::success); + } + } + + { + // Try to load catalogue in environment with different network ID + Env env2{ + *this, + envconfig([](std::unique_ptr cfg) { + cfg->NETWORK_ID = 456; + return cfg; + }), + features, + nullptr, + beast::severities::kInfo}; + + { + Json::Value params{Json::objectValue}; + params[jss::input_file] = cataloguePath; + + auto const result = + env2.client().invoke("catalogue_load", params)[jss::result]; + + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT(result[jss::status] == "error"); + } + } + boost::filesystem::remove_all(tempDir); + } + + void + testCatalogueHashVerification(FeatureBitset features) + { + testcase("catalogue_load: Hash verification"); + using namespace test::jtx; + + // Create environment and test data + Env env{ + *this, envconfig(), features, nullptr, beast::severities::kInfo}; + prepareLedgerData(env, 3); + + boost::filesystem::path tempDir = + boost::filesystem::temp_directory_path() / + boost::filesystem::unique_path(); + boost::filesystem::create_directories(tempDir); + + auto cataloguePath = (tempDir / "test.catl").string(); + + // Create catalogue + { + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = 3; + params[jss::max_ledger] = 5; + params[jss::output_file] = cataloguePath; + + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::status] == jss::success); + BEAST_EXPECT(result.isMember(jss::hash)); + std::string originalHash = result[jss::hash].asString(); + BEAST_EXPECT(!originalHash.empty()); + } + + // Test 1: Successful hash verification (normal load) + { + Json::Value params{Json::objectValue}; + params[jss::input_file] = cataloguePath; + + auto const result = + env.client().invoke("catalogue_load", params)[jss::result]; + BEAST_EXPECT(result[jss::status] == jss::success); + BEAST_EXPECT(result.isMember(jss::hash)); + } + + // Test 2: Corrupt the file and test hash mismatch detection + { + // Modify a byte in the middle of the file to cause hash mismatch + std::fstream file( + cataloguePath, std::ios::in | std::ios::out | std::ios::binary); + BEAST_EXPECT(file.good()); + + // Skip header and modify a byte + file.seekp(sizeof(TestCATLHeader) + 100, std::ios::beg); + char byte = 0xFF; + file.write(&byte, 1); + file.close(); + + // Try to load the corrupted file + Json::Value params{Json::objectValue}; + params[jss::input_file] = cataloguePath; + + auto const result = + env.client().invoke("catalogue_load", params)[jss::result]; + BEAST_EXPECT(result[jss::status] == "error"); + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT( + result[jss::error_message].asString().find( + "hash verification failed") != std::string::npos); + } + + // Test 3: Test ignore_hash parameter + { + Json::Value params{Json::objectValue}; + params[jss::input_file] = cataloguePath; + params[jss::ignore_hash] = true; + + auto const result = + env.client().invoke("catalogue_load", params)[jss::result]; + // This might still fail due to data corruption, but not because of + // hash verification The important part is that it didn't + // immediately reject due to hash + if (result[jss::status] == "error") + { + BEAST_EXPECT( + result[jss::error_message].asString().find( + "hash verification failed") == std::string::npos); + } + } + + boost::filesystem::remove_all(tempDir); + } + + void + testCatalogueFileSize(FeatureBitset features) + { + testcase("catalogue_load: File size verification"); + using namespace test::jtx; + + // Create environment and test data + Env env{ + *this, envconfig(), features, nullptr, beast::severities::kInfo}; + prepareLedgerData(env, 3); + + boost::filesystem::path tempDir = + boost::filesystem::temp_directory_path() / + boost::filesystem::unique_path(); + boost::filesystem::create_directories(tempDir); + + auto cataloguePath = (tempDir / "test.catl").string(); + + // Create catalogue + { + Json::Value params{Json::objectValue}; + params[jss::min_ledger] = 3; + params[jss::max_ledger] = 5; + params[jss::output_file] = cataloguePath; + + auto const result = + env.client().invoke("catalogue_create", params)[jss::result]; + BEAST_EXPECT(result[jss::status] == jss::success); + BEAST_EXPECT(result.isMember(jss::file_size)); + uint64_t originalSize = result[jss::file_size].asUInt(); + BEAST_EXPECT(originalSize > 0); + } + + // Test 1: Successful file size verification (normal load) + { + Json::Value params{Json::objectValue}; + params[jss::input_file] = cataloguePath; + + auto const result = + env.client().invoke("catalogue_load", params)[jss::result]; + BEAST_EXPECT(result[jss::status] == jss::success); + BEAST_EXPECT(result.isMember(jss::file_size)); + } + + // Test 2: Modify file size in header to cause mismatch + { + // Modify the filesize in the header to cause mismatch + std::fstream file( + cataloguePath, std::ios::in | std::ios::out | std::ios::binary); + BEAST_EXPECT(file.good()); + + file.seekp(offsetof(TestCATLHeader, filesize), std::ios::beg); + uint64_t wrongSize = 12345; // Some arbitrary wrong size + file.write( + reinterpret_cast(&wrongSize), sizeof(wrongSize)); + file.close(); + + // Try to load the modified file + Json::Value params{Json::objectValue}; + params[jss::input_file] = cataloguePath; + + auto const result = + env.client().invoke("catalogue_load", params)[jss::result]; + BEAST_EXPECT(result[jss::status] == "error"); + BEAST_EXPECT(result[jss::error] == "invalidParams"); + BEAST_EXPECT( + result[jss::error_message].asString().find( + "file size mismatch") != std::string::npos); + } + + boost::filesystem::remove_all(tempDir); + } + + void + testCatalogueCompression(FeatureBitset features) + { + testcase("catalogue: Compression levels"); + using namespace test::jtx; + + // Create environment and test data + Env env{ + *this, envconfig(), features, nullptr, beast::severities::kInfo}; + prepareLedgerData(env, 5); + + boost::filesystem::path tempDir = + boost::filesystem::temp_directory_path() / + boost::filesystem::unique_path(); + boost::filesystem::create_directories(tempDir); + + std::vector> compressionTests = { + {"no_compression", Json::Value(0)}, // Level 0 (none) + {"min_compression", Json::Value(1)}, // Level 1 (minimal) + {"default_compression", Json::Value(6)}, // Level 6 (default) + {"max_compression", Json::Value(9)}, // Level 9 (maximum) + {"boolean_true_compression", + Json::Value(true)} // Boolean true (should use default level 6) + }; + + uint64_t prevSize = 0; + for (const auto& test : compressionTests) + { + std::string testName = test.first; + Json::Value compressionLevel = test.second; + + auto cataloguePath = (tempDir / (testName + ".catl")).string(); + + // Create catalogue with specific compression level + Json::Value createParams{Json::objectValue}; + createParams[jss::min_ledger] = 3; + createParams[jss::max_ledger] = 10; + createParams[jss::output_file] = cataloguePath; + createParams[jss::compression_level] = compressionLevel; + + auto createResult = env.client().invoke( + "catalogue_create", createParams)[jss::result]; + + BEAST_EXPECT(createResult[jss::status] == jss::success); + + uint64_t fileSize = createResult[jss::file_size].asUInt(); + BEAST_EXPECT(fileSize > 0); + + // Load the catalogue to verify it works + Json::Value loadParams{Json::objectValue}; + loadParams[jss::input_file] = cataloguePath; + + auto loadResult = + env.client().invoke("catalogue_load", loadParams)[jss::result]; + BEAST_EXPECT(loadResult[jss::status] == jss::success); + + // For levels > 0, verify size is smaller than uncompressed (or at + // least not larger) + if (prevSize > 0 && compressionLevel.asUInt() > 0) + { + BEAST_EXPECT(fileSize <= prevSize); + } + + // Store size for comparison with next level + if (compressionLevel.asUInt() == 0) + { + prevSize = fileSize; + } + + // Verify compression level in response + if (compressionLevel.isBool() && compressionLevel.asBool()) + { + BEAST_EXPECT( + createResult[jss::compression_level].asUInt() == 6); + } + else + { + BEAST_EXPECT( + createResult[jss::compression_level].asUInt() == + compressionLevel.asUInt()); + } + } + + boost::filesystem::remove_all(tempDir); + } + + void + testCatalogueStatus(FeatureBitset features) + { + testcase("catalogue_status: Status reporting"); + using namespace test::jtx; + + // Create environment + Env env{ + *this, envconfig(), features, nullptr, beast::severities::kInfo}; + + boost::filesystem::path tempDir = + boost::filesystem::temp_directory_path() / + boost::filesystem::unique_path(); + boost::filesystem::create_directories(tempDir); + + auto cataloguePath = (tempDir / "test.catl").string(); + + // Test 1: Check status when no job is running + { + auto result = env.client().invoke( + "catalogue_status", Json::objectValue)[jss::result]; + std::cout << to_string(result) << "\n"; + BEAST_EXPECT(result[jss::job_status] == "no_job_running"); + } + + // TODO: add a parallel job test here... if anyone feels thats actually + // needed + + boost::filesystem::remove_all(tempDir); + } + +public: + void + run() override + { + using namespace test::jtx; + FeatureBitset const all{supported_amendments()}; + testCatalogueCreateBadInput(all); + testCatalogueCreate(all); + testCatalogueLoadBadInput(all); + testCatalogueLoadAndVerify(all); + testNetworkMismatch(all); + testCatalogueHashVerification(all); + testCatalogueFileSize(all); + testCatalogueCompression(all); + testCatalogueStatus(all); + } +}; + +BEAST_DEFINE_TESTSUITE(Catalogue, rpc, ripple); + +} // namespace ripple From d546d761ce6d116b5601d147077e098257dd3dc0 Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Tue, 1 Apr 2025 18:00:13 +0700 Subject: [PATCH 31/33] Fix using using Status with rpcError (#484) --- src/ripple/rpc/handlers/Catalogue.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/ripple/rpc/handlers/Catalogue.cpp b/src/ripple/rpc/handlers/Catalogue.cpp index 28768d50d..a5f159c08 100644 --- a/src/ripple/rpc/handlers/Catalogue.cpp +++ b/src/ripple/rpc/handlers/Catalogue.cpp @@ -549,9 +549,8 @@ doCatalogueCreate(RPC::JsonContext& context) UPDATE_CATALOGUE_STATUS(ledgerUpto, min_ledger); // Load the first ledger - auto status = RPC::getLedger(currLedger, min_ledger, context); - if (status.toErrorCode() != rpcSUCCESS) - return rpcError(status); + if (auto error = RPC::getLedger(currLedger, min_ledger, context)) + return rpcError(error.toErrorCode(), error.message()); if (!currLedger) return rpcError(rpcLEDGER_MISSING); @@ -575,9 +574,8 @@ doCatalogueCreate(RPC::JsonContext& context) // Load the next ledger currLedger = nullptr; // Release any previous current ledger - auto status = RPC::getLedger(currLedger, ledger_seq, context); - if (status.toErrorCode() != rpcSUCCESS) - return rpcError(status); + if (auto error = RPC::getLedger(currLedger, ledger_seq, context)) + return rpcError(error.toErrorCode(), error.message()); if (!currLedger) return rpcError(rpcLEDGER_MISSING); From c4b5ae37877f3b75061c9c98de9512d11bb0e86a Mon Sep 17 00:00:00 2001 From: Niq Dudfield Date: Fri, 4 Apr 2025 09:53:45 +0700 Subject: [PATCH 32/33] Fix missing includes in Catalogue.cpp for non-unity builds (#485) --- src/ripple/rpc/handlers/Catalogue.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ripple/rpc/handlers/Catalogue.cpp b/src/ripple/rpc/handlers/Catalogue.cpp index a5f159c08..8a8f76d4d 100644 --- a/src/ripple/rpc/handlers/Catalogue.cpp +++ b/src/ripple/rpc/handlers/Catalogue.cpp @@ -18,7 +18,9 @@ //============================================================================== #include +#include #include +#include #include #include #include From 2fb5c921406249336e9c9a97247a376e2ef34d9f Mon Sep 17 00:00:00 2001 From: tequ Date: Sat, 5 Apr 2025 02:32:47 +0900 Subject: [PATCH 33/33] feat: Run unittests in parallel with Github Actions (#483) Implement parallel execution for unit tests using Github Actions to improve CI pipeline efficiency and reduce build times. --- docker-unit-tests.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) mode change 100644 => 100755 docker-unit-tests.sh diff --git a/docker-unit-tests.sh b/docker-unit-tests.sh old mode 100644 new mode 100755 index 6bfde48c0..8406ba86a --- a/docker-unit-tests.sh +++ b/docker-unit-tests.sh @@ -1,4 +1,11 @@ -#!/bin/bash +#!/bin/bash -x + +BUILD_CORES=$(echo "scale=0 ; `nproc` / 1.337" | bc) + +if [[ "$GITHUB_REPOSITORY" == "" ]]; then + #Default + BUILD_CORES=8 +fi echo "Mounting $(pwd)/io in ubuntu and running unit tests" -docker run --rm -i -v $(pwd):/io ubuntu sh -c '/io/release-build/xahaud -u' +docker run --rm -i -v $(pwd):/io -e BUILD_CORES=$BUILD_CORES ubuntu sh -c '/io/release-build/xahaud --unittest-jobs $BUILD_CORES -u'